zpsajst commited on
Commit
64bb768
Β·
1 Parent(s): 2398be6

Fix: Auto-download models on Render deployment

Browse files
Files changed (2) hide show
  1. combined_server.py +52 -38
  2. render.yaml +2 -0
combined_server.py CHANGED
@@ -154,18 +154,30 @@ print(f"πŸ“± Using device: {device}")
154
 
155
  print("πŸš€ Loading AI models...")
156
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  # RoBERTa for fake news detection
158
  print("Loading RoBERTa fake news detector...")
159
  try:
160
  roberta_tokenizer = AutoTokenizer.from_pretrained(
161
  "hamzab/roberta-fake-news-classification",
162
- cache_dir=r'D:\huggingface_cache',
163
- local_files_only=True
164
  )
165
  roberta_model = AutoModelForSequenceClassification.from_pretrained(
166
  "hamzab/roberta-fake-news-classification",
167
- cache_dir=r'D:\huggingface_cache',
168
- local_files_only=True
169
  ).to(device)
170
  roberta_model.eval() # Set to evaluation mode
171
  print("βœ… RoBERTa loaded")
@@ -177,13 +189,13 @@ except Exception as e:
177
  print("Loading emotion classifier...")
178
  emotion_tokenizer = AutoTokenizer.from_pretrained(
179
  "j-hartmann/emotion-english-distilroberta-base",
180
- cache_dir=r'D:\huggingface_cache',
181
- local_files_only=True
182
  )
183
  emotion_model = AutoModelForSequenceClassification.from_pretrained(
184
  "j-hartmann/emotion-english-distilroberta-base",
185
- cache_dir=r'D:\huggingface_cache',
186
- local_files_only=True
187
  ).to(device)
188
  print("βœ… Emotion model loaded")
189
 
@@ -279,13 +291,13 @@ def load_ner_model():
279
  print("πŸ”„ Loading NER model...")
280
  ner_tokenizer = AutoTokenizer.from_pretrained(
281
  "dslim/bert-base-NER",
282
- cache_dir=r'D:\huggingface_cache',
283
- local_files_only=True
284
  )
285
  ner_model = AutoModelForTokenClassification.from_pretrained(
286
  "dslim/bert-base-NER",
287
- cache_dir=r'D:\huggingface_cache',
288
- local_files_only=True
289
  ).to(device)
290
  print("βœ… NER model loaded")
291
 
@@ -296,13 +308,13 @@ def load_hate_speech_model():
296
  print("πŸ”„ Loading Hate Speech detector...")
297
  hate_speech_tokenizer = AutoTokenizer.from_pretrained(
298
  "facebook/roberta-hate-speech-dynabench-r4-target",
299
- cache_dir=r'D:\huggingface_cache',
300
- local_files_only=True
301
  )
302
  hate_speech_model = AutoModelForSequenceClassification.from_pretrained(
303
  "facebook/roberta-hate-speech-dynabench-r4-target",
304
- cache_dir=r'D:\huggingface_cache',
305
- local_files_only=True
306
  ).to(device)
307
  print("βœ… Hate Speech detector loaded")
308
 
@@ -313,13 +325,13 @@ def load_clickbait_model():
313
  print("πŸ”„ Loading Clickbait detector...")
314
  clickbait_tokenizer = AutoTokenizer.from_pretrained(
315
  "elozano/bert-base-cased-clickbait-news",
316
- cache_dir=r'D:\huggingface_cache',
317
- local_files_only=True
318
  )
319
  clickbait_model = AutoModelForSequenceClassification.from_pretrained(
320
  "elozano/bert-base-cased-clickbait-news",
321
- cache_dir=r'D:\huggingface_cache',
322
- local_files_only=True
323
  ).to(device)
324
  print("βœ… Clickbait detector loaded")
325
 
@@ -330,13 +342,13 @@ def load_bias_model():
330
  print("πŸ”„ Loading Bias detector...")
331
  bias_tokenizer = AutoTokenizer.from_pretrained(
332
  "valurank/distilroberta-bias",
333
- cache_dir=r'D:\huggingface_cache',
334
- local_files_only=True
335
  )
336
  bias_model = AutoModelForSequenceClassification.from_pretrained(
337
  "valurank/distilroberta-bias",
338
- cache_dir=r'D:\huggingface_cache',
339
- local_files_only=True
340
  ).to(device)
341
  print("βœ… Bias detector loaded")
342
 
@@ -348,13 +360,13 @@ def load_fake_news_bert_model():
348
  print("πŸ”„ Loading Fake News BERT #2...")
349
  fake_news_bert_tokenizer = AutoTokenizer.from_pretrained(
350
  "jy46604790/Fake-News-Bert-Detect",
351
- cache_dir=r'D:\huggingface_cache',
352
- local_files_only=True
353
  )
354
  fake_news_bert_model = AutoModelForSequenceClassification.from_pretrained(
355
  "jy46604790/Fake-News-Bert-Detect",
356
- cache_dir=r'D:\huggingface_cache',
357
- local_files_only=True
358
  ).to(device)
359
  fake_news_bert_model.eval()
360
  print("βœ… Fake News BERT #2 loaded")
@@ -369,13 +381,13 @@ def load_fake_news_pulk_model():
369
  print("πŸ”„ Loading Fake News Pulk17...")
370
  fake_news_pulk_tokenizer = AutoTokenizer.from_pretrained(
371
  "Pulk17/Fake-News-Detection",
372
- cache_dir=r'D:\huggingface_cache',
373
- local_files_only=True
374
  )
375
  fake_news_pulk_model = AutoModelForSequenceClassification.from_pretrained(
376
  "Pulk17/Fake-News-Detection",
377
- cache_dir=r'D:\huggingface_cache',
378
- local_files_only=True
379
  ).to(device)
380
  fake_news_pulk_model.eval()
381
  print("βœ… Fake News Pulk17 loaded")
@@ -392,13 +404,13 @@ def load_custom_model():
392
  print("πŸ”„ Loading custom trained model...")
393
  custom_tokenizer = AutoTokenizer.from_pretrained(
394
  custom_model_path,
395
- cache_dir=r'D:\huggingface_cache',
396
- local_files_only=True
397
  )
398
  custom_model = AutoModelForSequenceClassification.from_pretrained(
399
  custom_model_path,
400
- cache_dir=r'D:\huggingface_cache',
401
- local_files_only=True
402
  ).to(device)
403
  custom_model.eval()
404
  print("βœ… Custom model loaded")
@@ -557,8 +569,8 @@ def analyze_with_custom_model(text):
557
  if custom_tokenizer is None or custom_model is None:
558
  try:
559
  print(f"Loading custom model from {custom_model_path}...")
560
- custom_tokenizer = AutoTokenizer.from_pretrained(custom_model_path, local_files_only=True)
561
- custom_model = AutoModelForSequenceClassification.from_pretrained(custom_model_path, local_files_only=True).to(device)
562
  print("βœ… Custom model loaded")
563
  except Exception as e:
564
  print(f"⚠️ Custom model not available: {e}")
@@ -2400,3 +2412,5 @@ if __name__ == '__main__':
2400
  print("\nπŸ’‘ Server crashed. Please restart.")
2401
  sys.exit(1)
2402
 
 
 
 
154
 
155
  print("πŸš€ Loading AI models...")
156
 
157
+ # Determine cache directory and loading mode
158
+ import os
159
+ IS_PRODUCTION = os.environ.get('RENDER') or os.environ.get('RAILWAY_ENVIRONMENT')
160
+ if IS_PRODUCTION:
161
+ CACHE_DIR = './models_cache'
162
+ LOCAL_ONLY = False
163
+ print("πŸ“¦ Production mode: Will download models from HuggingFace")
164
+ else:
165
+ CACHE_DIR = r'D:\huggingface_cache'
166
+ LOCAL_ONLY = True
167
+ print("πŸ’» Local mode: Using cached models")
168
+
169
  # RoBERTa for fake news detection
170
  print("Loading RoBERTa fake news detector...")
171
  try:
172
  roberta_tokenizer = AutoTokenizer.from_pretrained(
173
  "hamzab/roberta-fake-news-classification",
174
+ cache_dir=CACHE_DIR,
175
+ local_files_only=LOCAL_ONLY
176
  )
177
  roberta_model = AutoModelForSequenceClassification.from_pretrained(
178
  "hamzab/roberta-fake-news-classification",
179
+ cache_dir=CACHE_DIR,
180
+ local_files_only=LOCAL_ONLY
181
  ).to(device)
182
  roberta_model.eval() # Set to evaluation mode
183
  print("βœ… RoBERTa loaded")
 
189
  print("Loading emotion classifier...")
190
  emotion_tokenizer = AutoTokenizer.from_pretrained(
191
  "j-hartmann/emotion-english-distilroberta-base",
192
+ cache_dir=CACHE_DIR,
193
+ local_files_only=LOCAL_ONLY
194
  )
195
  emotion_model = AutoModelForSequenceClassification.from_pretrained(
196
  "j-hartmann/emotion-english-distilroberta-base",
197
+ cache_dir=CACHE_DIR,
198
+ local_files_only=LOCAL_ONLY
199
  ).to(device)
200
  print("βœ… Emotion model loaded")
201
 
 
291
  print("πŸ”„ Loading NER model...")
292
  ner_tokenizer = AutoTokenizer.from_pretrained(
293
  "dslim/bert-base-NER",
294
+ cache_dir=CACHE_DIR,
295
+ local_files_only=LOCAL_ONLY
296
  )
297
  ner_model = AutoModelForTokenClassification.from_pretrained(
298
  "dslim/bert-base-NER",
299
+ cache_dir=CACHE_DIR,
300
+ local_files_only=LOCAL_ONLY
301
  ).to(device)
302
  print("βœ… NER model loaded")
303
 
 
308
  print("πŸ”„ Loading Hate Speech detector...")
309
  hate_speech_tokenizer = AutoTokenizer.from_pretrained(
310
  "facebook/roberta-hate-speech-dynabench-r4-target",
311
+ cache_dir=CACHE_DIR,
312
+ local_files_only=LOCAL_ONLY
313
  )
314
  hate_speech_model = AutoModelForSequenceClassification.from_pretrained(
315
  "facebook/roberta-hate-speech-dynabench-r4-target",
316
+ cache_dir=CACHE_DIR,
317
+ local_files_only=LOCAL_ONLY
318
  ).to(device)
319
  print("βœ… Hate Speech detector loaded")
320
 
 
325
  print("πŸ”„ Loading Clickbait detector...")
326
  clickbait_tokenizer = AutoTokenizer.from_pretrained(
327
  "elozano/bert-base-cased-clickbait-news",
328
+ cache_dir=CACHE_DIR,
329
+ local_files_only=LOCAL_ONLY
330
  )
331
  clickbait_model = AutoModelForSequenceClassification.from_pretrained(
332
  "elozano/bert-base-cased-clickbait-news",
333
+ cache_dir=CACHE_DIR,
334
+ local_files_only=LOCAL_ONLY
335
  ).to(device)
336
  print("βœ… Clickbait detector loaded")
337
 
 
342
  print("πŸ”„ Loading Bias detector...")
343
  bias_tokenizer = AutoTokenizer.from_pretrained(
344
  "valurank/distilroberta-bias",
345
+ cache_dir=CACHE_DIR,
346
+ local_files_only=LOCAL_ONLY
347
  )
348
  bias_model = AutoModelForSequenceClassification.from_pretrained(
349
  "valurank/distilroberta-bias",
350
+ cache_dir=CACHE_DIR,
351
+ local_files_only=LOCAL_ONLY
352
  ).to(device)
353
  print("βœ… Bias detector loaded")
354
 
 
360
  print("πŸ”„ Loading Fake News BERT #2...")
361
  fake_news_bert_tokenizer = AutoTokenizer.from_pretrained(
362
  "jy46604790/Fake-News-Bert-Detect",
363
+ cache_dir=CACHE_DIR,
364
+ local_files_only=LOCAL_ONLY
365
  )
366
  fake_news_bert_model = AutoModelForSequenceClassification.from_pretrained(
367
  "jy46604790/Fake-News-Bert-Detect",
368
+ cache_dir=CACHE_DIR,
369
+ local_files_only=LOCAL_ONLY
370
  ).to(device)
371
  fake_news_bert_model.eval()
372
  print("βœ… Fake News BERT #2 loaded")
 
381
  print("πŸ”„ Loading Fake News Pulk17...")
382
  fake_news_pulk_tokenizer = AutoTokenizer.from_pretrained(
383
  "Pulk17/Fake-News-Detection",
384
+ cache_dir=CACHE_DIR,
385
+ local_files_only=LOCAL_ONLY
386
  )
387
  fake_news_pulk_model = AutoModelForSequenceClassification.from_pretrained(
388
  "Pulk17/Fake-News-Detection",
389
+ cache_dir=CACHE_DIR,
390
+ local_files_only=LOCAL_ONLY
391
  ).to(device)
392
  fake_news_pulk_model.eval()
393
  print("βœ… Fake News Pulk17 loaded")
 
404
  print("πŸ”„ Loading custom trained model...")
405
  custom_tokenizer = AutoTokenizer.from_pretrained(
406
  custom_model_path,
407
+ cache_dir=CACHE_DIR,
408
+ local_files_only=LOCAL_ONLY
409
  )
410
  custom_model = AutoModelForSequenceClassification.from_pretrained(
411
  custom_model_path,
412
+ cache_dir=CACHE_DIR,
413
+ local_files_only=LOCAL_ONLY
414
  ).to(device)
415
  custom_model.eval()
416
  print("βœ… Custom model loaded")
 
569
  if custom_tokenizer is None or custom_model is None:
570
  try:
571
  print(f"Loading custom model from {custom_model_path}...")
572
+ custom_tokenizer = AutoTokenizer.from_pretrained(custom_model_path, local_files_only=LOCAL_ONLY)
573
+ custom_model = AutoModelForSequenceClassification.from_pretrained(custom_model_path, local_files_only=LOCAL_ONLY).to(device)
574
  print("βœ… Custom model loaded")
575
  except Exception as e:
576
  print(f"⚠️ Custom model not available: {e}")
 
2412
  print("\nπŸ’‘ Server crashed. Please restart.")
2413
  sys.exit(1)
2414
 
2415
+
2416
+
render.yaml CHANGED
@@ -10,3 +10,5 @@ services:
10
  value: 3.11.0
11
  - key: PORT
12
  value: 5000
 
 
 
10
  value: 3.11.0
11
  - key: PORT
12
  value: 5000
13
+ - key: RENDER
14
+ value: true