Kaan commited on
Commit
097833a
·
verified ·
1 Parent(s): 84927e5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -0
app.py CHANGED
@@ -17,9 +17,16 @@ async def generate_text():
17
 
18
  hf_hub_download(repo_id="TheBloke/Mistral-7B-v0.1-GGUF", filename="mistral-7b-v0.1.Q4_K_M.gguf", local_dir=model_dir)
19
 
 
 
 
 
 
 
20
  tokenizer = AutoTokenizer.from_pretrained(model_dir)
21
  model = AutoModelForCausalLM.from_pretrained(model_dir)
22
 
 
23
  prompt = "Once upon a time, there was a"
24
  inputs = tokenizer(prompt, return_tensors="pt")
25
  output = model.generate(input_ids=inputs["input_ids"], max_length=50, num_return_sequences=3, temperature=0.7)
 
17
 
18
  hf_hub_download(repo_id="TheBloke/Mistral-7B-v0.1-GGUF", filename="mistral-7b-v0.1.Q4_K_M.gguf", local_dir=model_dir)
19
 
20
+ # Check if config.json file exists in the model directory
21
+ config_file = os.path.join(model_dir, "config.json")
22
+ if not os.path.exists(config_file):
23
+ raise ValueError("config.json file is missing in the model directory")
24
+
25
+ # Load tokenizer and model
26
  tokenizer = AutoTokenizer.from_pretrained(model_dir)
27
  model = AutoModelForCausalLM.from_pretrained(model_dir)
28
 
29
+ # Generate text
30
  prompt = "Once upon a time, there was a"
31
  inputs = tokenizer(prompt, return_tensors="pt")
32
  output = model.generate(input_ids=inputs["input_ids"], max_length=50, num_return_sequences=3, temperature=0.7)