Spaces:
Running
Running
| from fastapi import FastApi, Request | |
| from pydantic import BaseModel | |
| from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM | |
| from fastapi.middleware import CORSMiddleware | |
| app = FastApi() | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_methods=["*"], | |
| allow_methods=["*"], | |
| ) | |
| model_name = "pszemraj/long-t5-tglobal-base-16384-book-summary" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForSeq2SeqLM.from_pretrained(model_name) | |
| class InputText(BaseModel): | |
| text: str | |
| async def summarize(input: InputText): | |
| inputs = tokenizer( | |
| input.text, | |
| return_tensors="pt", | |
| max_length=16384, | |
| truncation=True, | |
| ) | |
| summary_ids = model.generate( | |
| inputs["input_ids"], | |
| max_length=1024, | |
| min_length=50, | |
| length_penalty=2.0, | |
| num_beams=4, | |
| early_stopping=True, | |
| ) | |
| summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) | |
| return summary |