Spaces:
				
			
			
	
			
			
		Runtime error
		
	
	
	
			
			
	
	
	
	
		
		
		Runtime error
		
	Update app.py
Browse files
    	
        app.py
    CHANGED
    
    | @@ -1,222 +1,251 @@ | |
| 1 | 
            -
             | 
| 2 | 
            -
             | 
| 3 | 
            -
             | 
| 4 | 
            -
             | 
| 5 | 
            -
             | 
| 6 | 
            -
             | 
| 7 | 
            -
             | 
| 8 | 
            -
             | 
| 9 | 
            -
             | 
| 10 | 
            -
             | 
| 11 | 
            -
             | 
| 12 | 
            -
             | 
| 13 | 
            -
             | 
| 14 | 
            -
             | 
| 15 | 
            -
             | 
| 16 | 
            -
             | 
| 17 | 
            -
             | 
| 18 | 
            -
             | 
| 19 | 
            -
             | 
| 20 | 
            -
             | 
| 21 | 
            -
             | 
| 22 | 
            -
             | 
| 23 | 
            -
             | 
| 24 | 
            -
             | 
| 25 | 
            -
             | 
| 26 | 
            -
             | 
| 27 | 
            -
             | 
| 28 | 
            -
             | 
| 29 | 
            -
             | 
| 30 | 
            -
             | 
| 31 | 
            -
             | 
| 32 | 
            -
             | 
| 33 | 
            -
             | 
| 34 | 
            -
             | 
| 35 | 
            -
             | 
| 36 | 
            -
             | 
| 37 | 
            -
             | 
| 38 | 
            -
             | 
| 39 | 
            -
             | 
| 40 | 
            -
             | 
| 41 | 
            -
             | 
| 42 | 
            -
             | 
| 43 | 
            -
             | 
| 44 | 
            -
             | 
| 45 | 
            -
             | 
| 46 | 
            -
             | 
| 47 | 
            -
             | 
| 48 | 
            -
             | 
| 49 | 
            -
             | 
| 50 | 
            -
                 | 
| 51 | 
            -
                 | 
| 52 | 
            -
             | 
| 53 | 
            -
             | 
| 54 | 
            -
             | 
| 55 | 
            -
                model | 
| 56 | 
            -
             | 
| 57 | 
            -
             | 
| 58 | 
            -
             | 
| 59 | 
            -
                 | 
| 60 | 
            -
             | 
| 61 | 
            -
             | 
| 62 | 
            -
             | 
| 63 | 
            -
             | 
| 64 | 
            -
             | 
| 65 | 
            -
             | 
| 66 | 
            -
             | 
| 67 | 
            -
                 | 
| 68 | 
            -
                 | 
| 69 | 
            -
                 | 
| 70 | 
            -
                 | 
| 71 | 
            -
                 | 
| 72 | 
            -
             | 
| 73 | 
            -
             | 
| 74 | 
            -
             | 
| 75 | 
            -
             | 
| 76 | 
            -
             | 
| 77 | 
            -
             | 
| 78 | 
            -
             | 
| 79 | 
            -
             | 
| 80 | 
            -
             | 
| 81 | 
            -
             | 
| 82 | 
            -
             | 
| 83 | 
            -
                 | 
| 84 | 
            -
                 | 
| 85 | 
            -
                 | 
| 86 | 
            -
             | 
| 87 | 
            -
             | 
| 88 | 
            -
                 | 
| 89 | 
            -
                 | 
| 90 | 
            -
             | 
| 91 | 
            -
             | 
| 92 | 
            -
             | 
| 93 | 
            -
             | 
| 94 | 
            -
             | 
| 95 | 
            -
                 | 
| 96 | 
            -
             | 
| 97 | 
            -
             | 
| 98 | 
            -
             | 
| 99 | 
            -
             | 
| 100 | 
            -
                     | 
| 101 | 
            -
             | 
| 102 | 
            -
             | 
| 103 | 
            -
             | 
| 104 | 
            -
                     | 
| 105 | 
            -
                         | 
| 106 | 
            -
                     | 
| 107 | 
            -
             | 
| 108 | 
            -
             | 
| 109 | 
            -
                return  | 
| 110 | 
            -
             | 
| 111 | 
            -
             | 
| 112 | 
            -
            def  | 
| 113 | 
            -
                 | 
| 114 | 
            -
             | 
| 115 | 
            -
                 | 
| 116 | 
            -
                 | 
| 117 | 
            -
                     | 
| 118 | 
            -
             | 
| 119 | 
            -
                     | 
| 120 | 
            -
                     | 
| 121 | 
            -
             | 
| 122 | 
            -
             | 
| 123 | 
            -
             | 
| 124 | 
            -
             | 
| 125 | 
            -
                 | 
| 126 | 
            -
             | 
| 127 | 
            -
             | 
| 128 | 
            -
             | 
| 129 | 
            -
             | 
| 130 | 
            -
             | 
| 131 | 
            -
             | 
| 132 | 
            -
             | 
| 133 | 
            -
             | 
| 134 | 
            -
             | 
| 135 | 
            -
             | 
| 136 | 
            -
             | 
| 137 | 
            -
             | 
| 138 | 
            -
            @app. | 
| 139 | 
            -
            def  | 
| 140 | 
            -
                 | 
| 141 | 
            -
             | 
| 142 | 
            -
             | 
| 143 | 
            -
             | 
| 144 | 
            -
             | 
| 145 | 
            -
                 | 
| 146 | 
            -
             | 
| 147 | 
            -
                 | 
| 148 | 
            -
             | 
| 149 | 
            -
                 | 
| 150 | 
            -
                 | 
| 151 | 
            -
             | 
| 152 | 
            -
             | 
| 153 | 
            -
                 | 
| 154 | 
            -
             | 
| 155 | 
            -
             | 
| 156 | 
            -
             | 
| 157 | 
            -
                     | 
| 158 | 
            -
                     | 
| 159 | 
            -
                     | 
| 160 | 
            -
             | 
| 161 | 
            -
             | 
| 162 | 
            -
             | 
| 163 | 
            -
             | 
| 164 | 
            -
             | 
| 165 | 
            -
                     | 
| 166 | 
            -
             | 
| 167 | 
            -
             | 
| 168 | 
            -
             | 
| 169 | 
            -
             | 
| 170 | 
            -
             | 
| 171 | 
            -
             | 
| 172 | 
            -
             | 
| 173 | 
            -
                     | 
| 174 | 
            -
             | 
| 175 | 
            -
             | 
| 176 | 
            -
             | 
| 177 | 
            -
             | 
| 178 | 
            -
             | 
| 179 | 
            -
             | 
| 180 | 
            -
             | 
| 181 | 
            -
             | 
| 182 | 
            -
             | 
| 183 | 
            -
             | 
| 184 | 
            -
             | 
| 185 | 
            -
                     | 
| 186 | 
            -
             | 
| 187 | 
            -
             | 
| 188 | 
            -
             | 
| 189 | 
            -
             | 
| 190 | 
            -
             | 
| 191 | 
            -
             | 
| 192 | 
            -
             | 
| 193 | 
            -
             | 
| 194 | 
            -
             | 
| 195 | 
            -
             | 
| 196 | 
            -
             | 
| 197 | 
            -
             | 
| 198 | 
            -
             | 
| 199 | 
            -
             | 
| 200 | 
            -
             | 
| 201 | 
            -
             | 
| 202 | 
            -
                    " | 
| 203 | 
            -
                    " | 
| 204 | 
            -
                    " | 
| 205 | 
            -
                    " | 
| 206 | 
            -
                    " | 
| 207 | 
            -
             | 
| 208 | 
            -
             | 
| 209 | 
            -
             | 
| 210 | 
            -
             | 
| 211 | 
            -
             | 
| 212 | 
            -
             | 
| 213 | 
            -
                     | 
| 214 | 
            -
             | 
| 215 | 
            -
             | 
| 216 | 
            -
             | 
| 217 | 
            -
             | 
| 218 | 
            -
             | 
| 219 | 
            -
             | 
| 220 | 
            -
             | 
| 221 | 
            -
             | 
| 222 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            """
         | 
| 2 | 
            +
            Minimal OpenAI-compatible local server that serves /LiquidAI/LFM2-1.2B via Hugging Face
         | 
| 3 | 
            +
            Transformers on CPU and exposes a subset of the OpenAI REST API (chat/completions, models).
         | 
| 4 | 
            +
             | 
| 5 | 
            +
            Save as local_openai_compatible_server.py and run:
         | 
| 6 | 
            +
                pip install -r requirements.txt
         | 
| 7 | 
            +
                python local_openai_compatible_server.py
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            Or run with uvicorn directly (recommended for production/dev):
         | 
| 10 | 
            +
                uvicorn local_openai_compatible_server:app --host 0.0.0.0 --port 7860
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            Requirements (requirements.txt):
         | 
| 13 | 
            +
                fastapi
         | 
| 14 | 
            +
                "uvicorn[standard]"
         | 
| 15 | 
            +
                transformers
         | 
| 16 | 
            +
                torch
         | 
| 17 | 
            +
             | 
| 18 | 
            +
            Notes:
         | 
| 19 | 
            +
            - CPU-only: model loads on CPU (may be slow for a 1.2B model depending on your machine).
         | 
| 20 | 
            +
            - Model repo id used: "/LiquidAI/LFM2-1.2B" — adjust if you have a different path or local copy.
         | 
| 21 | 
            +
            - This provides a simplified compatibility layer. It is NOT feature-complete with OpenAI's API
         | 
| 22 | 
            +
              but implements common fields: messages, max_tokens, temperature, top_p, n, stop, stream (basic).
         | 
| 23 | 
            +
            """
         | 
| 24 | 
            +
             | 
| 25 | 
            +
            from fastapi import FastAPI, Request, HTTPException
         | 
| 26 | 
            +
            from fastapi.responses import JSONResponse, StreamingResponse, PlainTextResponse
         | 
| 27 | 
            +
            from fastapi.middleware.cors import CORSMiddleware
         | 
| 28 | 
            +
            from pydantic import BaseModel
         | 
| 29 | 
            +
            from typing import List, Optional, Any, Dict
         | 
| 30 | 
            +
            import torch
         | 
| 31 | 
            +
            from transformers import AutoTokenizer, AutoModelForCausalLM
         | 
| 32 | 
            +
            import time
         | 
| 33 | 
            +
            import json
         | 
| 34 | 
            +
            import uuid
         | 
| 35 | 
            +
             | 
| 36 | 
            +
            # -----------------------------
         | 
| 37 | 
            +
            # Configuration
         | 
| 38 | 
            +
            # -----------------------------
         | 
| 39 | 
            +
            MODEL_ID = "/LiquidAI/LFM2-1.2B"  # change to your model location or HF repo
         | 
| 40 | 
            +
            HOST = "0.0.0.0"
         | 
| 41 | 
            +
            PORT = 7860
         | 
| 42 | 
            +
            DEVICE = torch.device("cpu")  # CPU-only as requested
         | 
| 43 | 
            +
            DEFAULT_MAX_TOKENS = 256
         | 
| 44 | 
            +
             | 
| 45 | 
            +
            # -----------------------------
         | 
| 46 | 
            +
            # Load model & tokenizer
         | 
| 47 | 
            +
            # -----------------------------
         | 
| 48 | 
            +
            print(f"Loading tokenizer and model '{MODEL_ID}' on device {DEVICE} (CPU-only)... this may take a while")
         | 
| 49 | 
            +
            try:
         | 
| 50 | 
            +
                tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, use_fast=True)
         | 
| 51 | 
            +
                model = AutoModelForCausalLM.from_pretrained(MODEL_ID, torch_dtype=torch.float32)
         | 
| 52 | 
            +
                model.to(DEVICE)
         | 
| 53 | 
            +
                model.eval()
         | 
| 54 | 
            +
            except Exception as e:
         | 
| 55 | 
            +
                raise RuntimeError(f"Failed to load model/tokenizer for '{MODEL_ID}': {e}")
         | 
| 56 | 
            +
             | 
| 57 | 
            +
            # If tokenizer has no pad/eos, try to set sensible defaults
         | 
| 58 | 
            +
            if tokenizer.pad_token_id is None:
         | 
| 59 | 
            +
                if tokenizer.eos_token_id is not None:
         | 
| 60 | 
            +
                    tokenizer.pad_token_id = tokenizer.eos_token_id
         | 
| 61 | 
            +
             | 
| 62 | 
            +
            # -----------------------------
         | 
| 63 | 
            +
            # FastAPI app
         | 
| 64 | 
            +
            # -----------------------------
         | 
| 65 | 
            +
            app = FastAPI(title="Local OpenAI-compatible server (transformers)", version="0.1")
         | 
| 66 | 
            +
            app.add_middleware(
         | 
| 67 | 
            +
                CORSMiddleware,
         | 
| 68 | 
            +
                allow_origins=["*"],
         | 
| 69 | 
            +
                allow_credentials=True,
         | 
| 70 | 
            +
                allow_methods=["*"],
         | 
| 71 | 
            +
                allow_headers=["*"],
         | 
| 72 | 
            +
            )
         | 
| 73 | 
            +
             | 
| 74 | 
            +
            # -----------------------------
         | 
| 75 | 
            +
            # Pydantic models (request bodies)
         | 
| 76 | 
            +
            # -----------------------------
         | 
| 77 | 
            +
            class Message(BaseModel):
         | 
| 78 | 
            +
                role: str
         | 
| 79 | 
            +
                content: str
         | 
| 80 | 
            +
             | 
| 81 | 
            +
            class ChatCompletionRequest(BaseModel):
         | 
| 82 | 
            +
                model: Optional[str] = MODEL_ID
         | 
| 83 | 
            +
                messages: List[Message]
         | 
| 84 | 
            +
                max_tokens: Optional[int] = DEFAULT_MAX_TOKENS
         | 
| 85 | 
            +
                temperature: Optional[float] = 0.0
         | 
| 86 | 
            +
                top_p: Optional[float] = 1.0
         | 
| 87 | 
            +
                n: Optional[int] = 1
         | 
| 88 | 
            +
                stop: Optional[List[str]] = None
         | 
| 89 | 
            +
                stream: Optional[bool] = False
         | 
| 90 | 
            +
             | 
| 91 | 
            +
            # -----------------------------
         | 
| 92 | 
            +
            # Helpers
         | 
| 93 | 
            +
            # -----------------------------
         | 
| 94 | 
            +
            def build_prompt_from_messages(messages: List[Dict[str, Any]]) -> str:
         | 
| 95 | 
            +
                # Simple conversational prompt formatting. Adjust to suit model's expected format.
         | 
| 96 | 
            +
                parts = []
         | 
| 97 | 
            +
                for m in messages:
         | 
| 98 | 
            +
                    role = m.get("role", "user")
         | 
| 99 | 
            +
                    content = m.get("content", "")
         | 
| 100 | 
            +
                    if role == "system":
         | 
| 101 | 
            +
                        parts.append(f"<|system|> {content}\n")
         | 
| 102 | 
            +
                    elif role == "user":
         | 
| 103 | 
            +
                        parts.append(f"User: {content}\n")
         | 
| 104 | 
            +
                    elif role == "assistant":
         | 
| 105 | 
            +
                        parts.append(f"Assistant: {content}\n")
         | 
| 106 | 
            +
                    else:
         | 
| 107 | 
            +
                        parts.append(f"{role}: {content}\n")
         | 
| 108 | 
            +
                parts.append("Assistant: ")
         | 
| 109 | 
            +
                return "".join(parts)
         | 
| 110 | 
            +
             | 
| 111 | 
            +
             | 
| 112 | 
            +
            def apply_stop_sequences(text: str, stops: Optional[List[str]]) -> str:
         | 
| 113 | 
            +
                if not stops:
         | 
| 114 | 
            +
                    return text
         | 
| 115 | 
            +
                idx = None
         | 
| 116 | 
            +
                for s in stops:
         | 
| 117 | 
            +
                    if s == "":
         | 
| 118 | 
            +
                        continue
         | 
| 119 | 
            +
                    pos = text.find(s)
         | 
| 120 | 
            +
                    if pos != -1:
         | 
| 121 | 
            +
                        if idx is None or pos < idx:
         | 
| 122 | 
            +
                            idx = pos
         | 
| 123 | 
            +
                if idx is not None:
         | 
| 124 | 
            +
                    return text[:idx]
         | 
| 125 | 
            +
                return text
         | 
| 126 | 
            +
             | 
| 127 | 
            +
            # -----------------------------
         | 
| 128 | 
            +
            # Endpoints
         | 
| 129 | 
            +
            # -----------------------------
         | 
| 130 | 
            +
            @app.get("/", response_class=PlainTextResponse)
         | 
| 131 | 
            +
            async def root():
         | 
| 132 | 
            +
                return "Local OpenAI-compatible server running. Use /v1/chat/completions or /v1/models"
         | 
| 133 | 
            +
             | 
| 134 | 
            +
            @app.get("/v1/models")
         | 
| 135 | 
            +
            async def list_models():
         | 
| 136 | 
            +
                return {"data": [{"id": MODEL_ID, "object": "model"}], "object": "list"}
         | 
| 137 | 
            +
             | 
| 138 | 
            +
            @app.post("/v1/chat/completions")
         | 
| 139 | 
            +
            async def chat_completions(request: Request, body: ChatCompletionRequest):
         | 
| 140 | 
            +
                # Basic validation
         | 
| 141 | 
            +
                if body.model is None or body.model != MODEL_ID:
         | 
| 142 | 
            +
                    # Allow the default model but warn if mismatched
         | 
| 143 | 
            +
                    raise HTTPException(status_code=400, detail={"error": "invalid_model", "message": f"Only model {MODEL_ID} is available on this server."})
         | 
| 144 | 
            +
             | 
| 145 | 
            +
                prompt = build_prompt_from_messages([m.dict() for m in body.messages])
         | 
| 146 | 
            +
             | 
| 147 | 
            +
                # Tokenize
         | 
| 148 | 
            +
                inputs = tokenizer(prompt, return_tensors="pt")
         | 
| 149 | 
            +
                input_ids = inputs["input_ids"].to(DEVICE)
         | 
| 150 | 
            +
                input_len = input_ids.shape[-1]
         | 
| 151 | 
            +
             | 
| 152 | 
            +
                # Generation settings
         | 
| 153 | 
            +
                gen_kwargs = {
         | 
| 154 | 
            +
                    "max_new_tokens": body.max_tokens,
         | 
| 155 | 
            +
                    "do_sample": bool(body.temperature and body.temperature > 0.0),
         | 
| 156 | 
            +
                    "temperature": float(body.temperature or 0.0),
         | 
| 157 | 
            +
                    "top_p": float(body.top_p or 1.0),
         | 
| 158 | 
            +
                    "num_return_sequences": int(body.n or 1),
         | 
| 159 | 
            +
                    "pad_token_id": tokenizer.pad_token_id or tokenizer.eos_token_id,
         | 
| 160 | 
            +
                    # note: on CPU large models may be slow
         | 
| 161 | 
            +
                }
         | 
| 162 | 
            +
             | 
| 163 | 
            +
                # Synchronous generation
         | 
| 164 | 
            +
                with torch.no_grad():
         | 
| 165 | 
            +
                    outputs = model.generate(input_ids, **gen_kwargs)
         | 
| 166 | 
            +
             | 
| 167 | 
            +
                choices = []
         | 
| 168 | 
            +
                for i, out_ids in enumerate(outputs):
         | 
| 169 | 
            +
                    full_text = tokenizer.decode(out_ids, skip_special_tokens=True)
         | 
| 170 | 
            +
                    # Attempt to strip the prompt prefix to return only generated reply
         | 
| 171 | 
            +
                    # find the last occurrence of the prompt in full_text (best-effort)
         | 
| 172 | 
            +
                    stripped = full_text
         | 
| 173 | 
            +
                    try:
         | 
| 174 | 
            +
                        # prefer exact match; fallback to trimming by token count
         | 
| 175 | 
            +
                        if prompt.strip() and prompt in full_text:
         | 
| 176 | 
            +
                            stripped = full_text.split(prompt, 1)[1]
         | 
| 177 | 
            +
                        else:
         | 
| 178 | 
            +
                            # fallback: remove first input_len tokens from decoded sequence
         | 
| 179 | 
            +
                            decoded_all = full_text
         | 
| 180 | 
            +
                            # naive fallback: no-op (we keep the full_text)
         | 
| 181 | 
            +
                            stripped = decoded_all
         | 
| 182 | 
            +
                    except Exception:
         | 
| 183 | 
            +
                        stripped = full_text
         | 
| 184 | 
            +
             | 
| 185 | 
            +
                    # apply stop sequences
         | 
| 186 | 
            +
                    stripped = apply_stop_sequences(stripped, body.stop)
         | 
| 187 | 
            +
             | 
| 188 | 
            +
                    # build choice structure similar to OpenAI
         | 
| 189 | 
            +
                    choice = {
         | 
| 190 | 
            +
                        "index": i,
         | 
| 191 | 
            +
                        "message": {"role": "assistant", "content": stripped},
         | 
| 192 | 
            +
                        "finish_reason": "stop" if body.stop else "length",
         | 
| 193 | 
            +
                    }
         | 
| 194 | 
            +
                    choices.append(choice)
         | 
| 195 | 
            +
             | 
| 196 | 
            +
                # approximate token usage
         | 
| 197 | 
            +
                completion_tokens = max(0, (outputs.shape[-1] - input_len) if outputs is not None else 0)
         | 
| 198 | 
            +
                usage = {"prompt_tokens": int(input_len), "completion_tokens": int(completion_tokens), "total_tokens": int(input_len + completion_tokens)}
         | 
| 199 | 
            +
             | 
| 200 | 
            +
                response = {
         | 
| 201 | 
            +
                    "id": str(uuid.uuid4()),
         | 
| 202 | 
            +
                    "object": "chat.completion",
         | 
| 203 | 
            +
                    "created": int(time.time()),
         | 
| 204 | 
            +
                    "model": body.model,
         | 
| 205 | 
            +
                    "choices": choices,
         | 
| 206 | 
            +
                    "usage": usage,
         | 
| 207 | 
            +
                }
         | 
| 208 | 
            +
             | 
| 209 | 
            +
                # Streaming: rudimentary implementation that streams chunks of the final text as SSE
         | 
| 210 | 
            +
                if body.stream:
         | 
| 211 | 
            +
                    # Only support streaming a single response (n > 1 will still stream the first)
         | 
| 212 | 
            +
                    text_to_stream = choices[0]["message"]["content"]
         | 
| 213 | 
            +
                    def event_stream():
         | 
| 214 | 
            +
                        # send a few small chunks
         | 
| 215 | 
            +
                        chunk_size = 128
         | 
| 216 | 
            +
                        for start in range(0, len(text_to_stream), chunk_size):
         | 
| 217 | 
            +
                            chunk = text_to_stream[start:start+chunk_size]
         | 
| 218 | 
            +
                            payload = {"id": response["id"], "object": "chat.completion.chunk", "choices": [{"delta": {"content": chunk}, "index": 0}]}
         | 
| 219 | 
            +
                            yield f"data: {json.dumps(payload)}\n\n"
         | 
| 220 | 
            +
                        # final done message
         | 
| 221 | 
            +
                        done_payload = {"id": response["id"], "object": "chat.completion.chunk", "choices": [{"delta": {}, "index": 0}], "done": True}
         | 
| 222 | 
            +
                        yield f"data: {json.dumps(done_payload)}\n\n"
         | 
| 223 | 
            +
                    return StreamingResponse(event_stream(), media_type="text/event-stream")
         | 
| 224 | 
            +
             | 
| 225 | 
            +
                return JSONResponse(response)
         | 
| 226 | 
            +
             | 
| 227 | 
            +
            # A convenience POST /v1/completions that accepts 'prompt' (legacy completions API)
         | 
| 228 | 
            +
            class CompletionRequest(BaseModel):
         | 
| 229 | 
            +
                model: Optional[str] = MODEL_ID
         | 
| 230 | 
            +
                prompt: Optional[str] = ""
         | 
| 231 | 
            +
                max_tokens: Optional[int] = DEFAULT_MAX_TOKENS
         | 
| 232 | 
            +
                temperature: Optional[float] = 0.0
         | 
| 233 | 
            +
                top_p: Optional[float] = 1.0
         | 
| 234 | 
            +
                n: Optional[int] = 1
         | 
| 235 | 
            +
                stop: Optional[List[str]] = None
         | 
| 236 | 
            +
                stream: Optional[bool] = False
         | 
| 237 | 
            +
             | 
| 238 | 
            +
            @app.post("/v1/completions")
         | 
| 239 | 
            +
            async def completions(req: CompletionRequest):
         | 
| 240 | 
            +
                # wrap prompt into the chat-format for our generator
         | 
| 241 | 
            +
                messages = [Message(role="user", content=req.prompt)]
         | 
| 242 | 
            +
                chat_req = ChatCompletionRequest(model=req.model, messages=messages, max_tokens=req.max_tokens, temperature=req.temperature, top_p=req.top_p, n=req.n, stop=req.stop, stream=req.stream)
         | 
| 243 | 
            +
                # call the chat_completions handler directly
         | 
| 244 | 
            +
                return await chat_completions(Request(scope={}), chat_req)
         | 
| 245 | 
            +
             | 
| 246 | 
            +
            # -----------------------------
         | 
| 247 | 
            +
            # If executed directly, run uvicorn
         | 
| 248 | 
            +
            # -----------------------------
         | 
| 249 | 
            +
            if __name__ == "__main__":
         | 
| 250 | 
            +
                import uvicorn
         | 
| 251 | 
            +
                uvicorn.run("local_openai_compatible_server:app", host=HOST, port=PORT, log_level="info")
         | 
