File size: 1,021 Bytes
0314811
37527e9
0314811
37527e9
f726f33
87e455f
0314811
 
 
 
37527e9
0314811
 
 
87e455f
0314811
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
from fastapi import FastAPI, HTTPException
from transformers import AutoModelForCausalLM, AutoTokenizer
import os

app = FastAPI()

cache_dir = os.path.expanduser("~/.cache/huggingface/")
model_folder = os.path.join(cache_dir, "TheBloke/Mistral-7B-v0.1-GGUF")
if not os.path.exists(model_folder):
    raise ValueError("Model not found in cache directory. Please download the model.")

# Load the tokenizer and model asynchronously
tokenizer = AutoTokenizer.from_pretrained(model_folder)
model = AutoModelForCausalLM.from_pretrained(model_folder)

@app.get("/")
async def generate_text():
    try:
        prompt = "Once upon a time, there was a"
        inputs = tokenizer(prompt, return_tensors="pt")
        output = model.generate(input_ids=inputs["input_ids"], max_length=50, num_return_sequences=3, temperature=0.7)
        generated_texts = tokenizer.batch_decode(output, skip_special_tokens=True)
        return generated_texts
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))