Spaces:
Sleeping
Sleeping
Create gemini/gemini_extractor.py
Browse files- gemini/gemini_extractor.py +84 -0
gemini/gemini_extractor.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Union, Optional
|
| 2 |
+
from indexify_extractor_sdk import Content, Extractor, Feature
|
| 3 |
+
from pydantic import BaseModel, Field
|
| 4 |
+
import os
|
| 5 |
+
import google.generativeai as genai
|
| 6 |
+
from pdf2image import convert_from_path
|
| 7 |
+
import tempfile
|
| 8 |
+
import mimetypes
|
| 9 |
+
|
| 10 |
+
class GeminiExtractorConfig(BaseModel):
|
| 11 |
+
model_name: Optional[str] = Field(default='gemini-1.5-flash-latest')
|
| 12 |
+
key: Optional[str] = Field(default=None)
|
| 13 |
+
prompt: str = Field(default='You are a helpful assistant.')
|
| 14 |
+
query: Optional[str] = Field(default=None)
|
| 15 |
+
|
| 16 |
+
class GeminiExtractor(Extractor):
|
| 17 |
+
name = "tensorlake/gemini"
|
| 18 |
+
description = "An extractor that let's you use LLMs from Gemini."
|
| 19 |
+
system_dependencies = []
|
| 20 |
+
input_mime_types = ["text/plain", "application/pdf", "image/jpeg", "image/png"]
|
| 21 |
+
|
| 22 |
+
def __init__(self):
|
| 23 |
+
super(GeminiExtractor, self).__init__()
|
| 24 |
+
|
| 25 |
+
def extract(self, content: Content, params: GeminiExtractorConfig) -> List[Union[Feature, Content]]:
|
| 26 |
+
contents = []
|
| 27 |
+
model_name = params.model_name
|
| 28 |
+
key = params.key
|
| 29 |
+
prompt = params.prompt
|
| 30 |
+
query = params.query
|
| 31 |
+
|
| 32 |
+
if content.content_type in ["application/pdf"]:
|
| 33 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as temp_file:
|
| 34 |
+
temp_file.write(content.data)
|
| 35 |
+
file_path = temp_file.name
|
| 36 |
+
images = convert_from_path(file_path)
|
| 37 |
+
image_files = []
|
| 38 |
+
for i in range(len(images)):
|
| 39 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_image_file:
|
| 40 |
+
images[i].save(temp_image_file.name, 'JPEG')
|
| 41 |
+
image_files.append(temp_image_file.name)
|
| 42 |
+
elif content.content_type in ["image/jpeg", "image/png"]:
|
| 43 |
+
image_files = []
|
| 44 |
+
suffix = mimetypes.guess_extension(content.content_type)
|
| 45 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as temp_image_file:
|
| 46 |
+
temp_image_file.write(content.data)
|
| 47 |
+
file_path = temp_image_file.name
|
| 48 |
+
image_files.append(file_path)
|
| 49 |
+
else:
|
| 50 |
+
text = content.data.decode("utf-8")
|
| 51 |
+
if query is None:
|
| 52 |
+
query = text
|
| 53 |
+
file_path = None
|
| 54 |
+
|
| 55 |
+
def upload_to_gemini(path, mime_type=None):
|
| 56 |
+
file = genai.upload_file(path, mime_type=mime_type)
|
| 57 |
+
print(f"Uploaded file '{file.display_name}' as: {file.uri}")
|
| 58 |
+
return file
|
| 59 |
+
|
| 60 |
+
if ('GEMINI_API_KEY' not in os.environ) and (key is None):
|
| 61 |
+
response_content = "The GEMINI_API_KEY environment variable is not present."
|
| 62 |
+
else:
|
| 63 |
+
if ('GEMINI_API_KEY' in os.environ) and (key is None):
|
| 64 |
+
genai.configure(api_key=os.environ["GEMINI_API_KEY"])
|
| 65 |
+
else:
|
| 66 |
+
genai.configure(api_key=key)
|
| 67 |
+
generation_config = { "temperature": 1, "top_p": 0.95, "top_k": 64, "max_output_tokens": 8192, "response_mime_type": "text/plain", }
|
| 68 |
+
model = genai.GenerativeModel( model_name=model_name, generation_config=generation_config, )
|
| 69 |
+
if file_path:
|
| 70 |
+
files = [upload_to_gemini(image_file, mime_type="image/jpeg") for image_file in image_files]
|
| 71 |
+
chat_session = model.start_chat( history=[ { "role": "user", "parts": files, }, ] )
|
| 72 |
+
response = chat_session.send_message(prompt)
|
| 73 |
+
else:
|
| 74 |
+
chat_session = model.start_chat( history=[ ] )
|
| 75 |
+
response = chat_session.send_message(prompt + " " + query)
|
| 76 |
+
|
| 77 |
+
response_content = response.text
|
| 78 |
+
|
| 79 |
+
contents.append(Content.from_text(response_content))
|
| 80 |
+
|
| 81 |
+
return contents
|
| 82 |
+
|
| 83 |
+
def sample_input(self) -> Content:
|
| 84 |
+
return Content.from_text("Hello world, I am a good boy.")
|