Radamés Ajna
		
	commited on
		
		
					Commit 
							
							·
						
						dac7840
	
0
								Parent(s):
							
							
Duplicate from radames/segment-anything-embeddings-base
Browse files- .gitattributes +34 -0
- README.md +8 -0
- pipeline.py +24 -0
- requirements.txt +4 -0
    	
        .gitattributes
    ADDED
    
    | @@ -0,0 +1,34 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            *.7z filter=lfs diff=lfs merge=lfs -text
         | 
| 2 | 
            +
            *.arrow filter=lfs diff=lfs merge=lfs -text
         | 
| 3 | 
            +
            *.bin filter=lfs diff=lfs merge=lfs -text
         | 
| 4 | 
            +
            *.bz2 filter=lfs diff=lfs merge=lfs -text
         | 
| 5 | 
            +
            *.ckpt filter=lfs diff=lfs merge=lfs -text
         | 
| 6 | 
            +
            *.ftz filter=lfs diff=lfs merge=lfs -text
         | 
| 7 | 
            +
            *.gz filter=lfs diff=lfs merge=lfs -text
         | 
| 8 | 
            +
            *.h5 filter=lfs diff=lfs merge=lfs -text
         | 
| 9 | 
            +
            *.joblib filter=lfs diff=lfs merge=lfs -text
         | 
| 10 | 
            +
            *.lfs.* filter=lfs diff=lfs merge=lfs -text
         | 
| 11 | 
            +
            *.mlmodel filter=lfs diff=lfs merge=lfs -text
         | 
| 12 | 
            +
            *.model filter=lfs diff=lfs merge=lfs -text
         | 
| 13 | 
            +
            *.msgpack filter=lfs diff=lfs merge=lfs -text
         | 
| 14 | 
            +
            *.npy filter=lfs diff=lfs merge=lfs -text
         | 
| 15 | 
            +
            *.npz filter=lfs diff=lfs merge=lfs -text
         | 
| 16 | 
            +
            *.onnx filter=lfs diff=lfs merge=lfs -text
         | 
| 17 | 
            +
            *.ot filter=lfs diff=lfs merge=lfs -text
         | 
| 18 | 
            +
            *.parquet filter=lfs diff=lfs merge=lfs -text
         | 
| 19 | 
            +
            *.pb filter=lfs diff=lfs merge=lfs -text
         | 
| 20 | 
            +
            *.pickle filter=lfs diff=lfs merge=lfs -text
         | 
| 21 | 
            +
            *.pkl filter=lfs diff=lfs merge=lfs -text
         | 
| 22 | 
            +
            *.pt filter=lfs diff=lfs merge=lfs -text
         | 
| 23 | 
            +
            *.pth filter=lfs diff=lfs merge=lfs -text
         | 
| 24 | 
            +
            *.rar filter=lfs diff=lfs merge=lfs -text
         | 
| 25 | 
            +
            *.safetensors filter=lfs diff=lfs merge=lfs -text
         | 
| 26 | 
            +
            saved_model/**/* filter=lfs diff=lfs merge=lfs -text
         | 
| 27 | 
            +
            *.tar.* filter=lfs diff=lfs merge=lfs -text
         | 
| 28 | 
            +
            *.tflite filter=lfs diff=lfs merge=lfs -text
         | 
| 29 | 
            +
            *.tgz filter=lfs diff=lfs merge=lfs -text
         | 
| 30 | 
            +
            *.wasm filter=lfs diff=lfs merge=lfs -text
         | 
| 31 | 
            +
            *.xz filter=lfs diff=lfs merge=lfs -text
         | 
| 32 | 
            +
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 33 | 
            +
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 34 | 
            +
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
    	
        README.md
    ADDED
    
    | @@ -0,0 +1,8 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            ---
         | 
| 2 | 
            +
            license: apache-2.0
         | 
| 3 | 
            +
            tags:
         | 
| 4 | 
            +
            - feature-extraction
         | 
| 5 | 
            +
            library_name: generic
         | 
| 6 | 
            +
            pipeline_tag: image-classification
         | 
| 7 | 
            +
            duplicated_from: radames/segment-anything-embeddings-base
         | 
| 8 | 
            +
            ---
         | 
    	
        pipeline.py
    ADDED
    
    | @@ -0,0 +1,24 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from typing import List
         | 
| 2 | 
            +
            import torch
         | 
| 3 | 
            +
            from transformers import SamModel, SamProcessor
         | 
| 4 | 
            +
            from PIL import Image
         | 
| 5 | 
            +
            import numpy as np
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            class PreTrainedPipeline():
         | 
| 8 | 
            +
                def __init__(self, path=""):
         | 
| 9 | 
            +
             | 
| 10 | 
            +
                    self.device = torch.device(
         | 
| 11 | 
            +
                        "cuda" if torch.cuda.is_available() else "cpu")
         | 
| 12 | 
            +
                    self.processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
         | 
| 13 | 
            +
                    self.model = SamModel.from_pretrained(
         | 
| 14 | 
            +
                        "facebook/sam-vit-base").to(self.device)
         | 
| 15 | 
            +
                    self.model.eval()
         | 
| 16 | 
            +
                    self.model = self.model.to(self.device)
         | 
| 17 | 
            +
             | 
| 18 | 
            +
                def __call__(self, inputs: "Image.Image") -> List[float]:
         | 
| 19 | 
            +
                    raw_image = inputs.convert("RGB")
         | 
| 20 | 
            +
                    inputs = self.processor(raw_image, return_tensors="pt").to(self.device)
         | 
| 21 | 
            +
                    feature_vector = self.model.get_image_embeddings(
         | 
| 22 | 
            +
                        inputs["pixel_values"])
         | 
| 23 | 
            +
             | 
| 24 | 
            +
                    return feature_vector.tolist()
         | 
    	
        requirements.txt
    ADDED
    
    | @@ -0,0 +1,4 @@ | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            git+https://github.com/huggingface/transformers.git
         | 
| 2 | 
            +
            torch
         | 
| 3 | 
            +
            numpy
         | 
| 4 | 
            +
            Pillow
         | 
