Spaces:
				
			
			
	
			
			
		Running
		
			on 
			
			Zero
	
	
	
			
			
	
	
	
	
		
		
		Running
		
			on 
			
			Zero
	Commit 
							
							·
						
						68e1082
	
1
								Parent(s):
							
							ba88b04
								
restore to live status
Browse files- utils/models.py +17 -17
    	
        utils/models.py
    CHANGED
    
    | @@ -17,25 +17,25 @@ from .prompts import format_rag_prompt | |
| 17 | 
             
            from .shared import generation_interrupt
         | 
| 18 |  | 
| 19 | 
             
            models = {
         | 
| 20 | 
            -
                 | 
| 21 | 
            -
                 | 
| 22 | 
            -
                 | 
| 23 | 
            -
                 | 
| 24 | 
            -
                 | 
| 25 | 
            -
                 | 
| 26 | 
            -
                 | 
| 27 | 
            -
                 | 
| 28 | 
            -
                 | 
| 29 | 
            -
                 | 
| 30 | 
            -
                "Bitnet-b1.58-2B4T": "microsoft/bitnet-b1.58-2B-4T",
         | 
| 31 | 
             
                # #"MiniCPM3-RAG-LoRA": "openbmb/MiniCPM3-RAG-LoRA",
         | 
| 32 | 
             
                "Qwen3-0.6b": "qwen/qwen3-0.6b",
         | 
| 33 | 
            -
                 | 
| 34 | 
            -
                 | 
| 35 | 
            -
                 | 
| 36 | 
            -
                 | 
| 37 | 
            -
                 | 
| 38 | 
            -
                 | 
| 39 | 
             
            }
         | 
| 40 |  | 
| 41 | 
             
            tokenizer_cache = {}
         | 
|  | |
| 17 | 
             
            from .shared import generation_interrupt
         | 
| 18 |  | 
| 19 | 
             
            models = {
         | 
| 20 | 
            +
                "Qwen2.5-1.5b-Instruct": "qwen/qwen2.5-1.5b-instruct",
         | 
| 21 | 
            +
                "Qwen2.5-3b-Instruct": "qwen/qwen2.5-3b-instruct",
         | 
| 22 | 
            +
                "Llama-3.2-1b-Instruct": "meta-llama/llama-3.2-1b-instruct",
         | 
| 23 | 
            +
                "Llama-3.2-3b-Instruct": "meta-llama/llama-3.2-3b-instruct",
         | 
| 24 | 
            +
                "Gemma-3-1b-it": "google/gemma-3-1b-it",
         | 
| 25 | 
            +
                "Gemma-3-4b-it": "google/gemma-3-4b-it",
         | 
| 26 | 
            +
                "Gemma-2-2b-it": "google/gemma-2-2b-it",
         | 
| 27 | 
            +
                "Phi-4-mini-instruct": "microsoft/phi-4-mini-instruct",
         | 
| 28 | 
            +
                "Cogito-v1-preview-llama-3b": "deepcogito/cogito-v1-preview-llama-3b",
         | 
| 29 | 
            +
                "IBM Granite-3.3-2b-instruct": "ibm-granite/granite-3.3-2b-instruct",
         | 
| 30 | 
            +
                # "Bitnet-b1.58-2B4T": "microsoft/bitnet-b1.58-2B-4T",
         | 
| 31 | 
             
                # #"MiniCPM3-RAG-LoRA": "openbmb/MiniCPM3-RAG-LoRA",
         | 
| 32 | 
             
                "Qwen3-0.6b": "qwen/qwen3-0.6b",
         | 
| 33 | 
            +
                "Qwen3-1.7b": "qwen/qwen3-1.7b",
         | 
| 34 | 
            +
                "Qwen3-4b": "qwen/qwen3-4b",
         | 
| 35 | 
            +
                "SmolLM2-1.7b-Instruct": "HuggingFaceTB/SmolLM2-1.7B-Instruct",
         | 
| 36 | 
            +
                "EXAONE-3.5-2.4B-instruct": "LGAI-EXAONE/EXAONE-3.5-2.4B-Instruct",
         | 
| 37 | 
            +
                "OLMo-2-1B-Instruct": "allenai/OLMo-2-0425-1B-Instruct",
         | 
| 38 | 
            +
                "icecream-3b": "aizip-dev/icecream-3b",
         | 
| 39 | 
             
            }
         | 
| 40 |  | 
| 41 | 
             
            tokenizer_cache = {}
         | 
 
			

