Add version print for transformers to debug dependency issues
Browse files- requirements.txt +1 -1
- train_abuse_model.py +6 -3
requirements.txt
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
torch>=2.2.0
|
| 2 |
-
transformers
|
| 3 |
scikit-learn
|
| 4 |
pandas
|
| 5 |
numpy<2
|
|
|
|
| 1 |
torch>=2.2.0
|
| 2 |
+
transformers==4.40.0
|
| 3 |
scikit-learn
|
| 4 |
pandas
|
| 5 |
numpy<2
|
train_abuse_model.py
CHANGED
|
@@ -6,16 +6,18 @@ import os
|
|
| 6 |
import pandas as pd
|
| 7 |
import numpy as np
|
| 8 |
import torch
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
from sklearn.model_selection import train_test_split
|
| 11 |
from sklearn.metrics import classification_report, precision_recall_fscore_support
|
| 12 |
|
| 13 |
-
from torch.utils.data import Dataset
|
| 14 |
-
|
| 15 |
# Hugging Face Hub
|
| 16 |
from huggingface_hub import hf_hub_download
|
| 17 |
|
| 18 |
# Hugging Face transformers
|
|
|
|
| 19 |
from transformers import (
|
| 20 |
AutoTokenizer,
|
| 21 |
DebertaV2Tokenizer,
|
|
@@ -25,7 +27,8 @@ from transformers import (
|
|
| 25 |
Trainer,
|
| 26 |
TrainingArguments
|
| 27 |
)
|
| 28 |
-
|
|
|
|
| 29 |
|
| 30 |
# Check for GPU availability
|
| 31 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
|
| 6 |
import pandas as pd
|
| 7 |
import numpy as np
|
| 8 |
import torch
|
| 9 |
+
from torch.utils.data import Dataset
|
| 10 |
+
|
| 11 |
+
print("PyTorch version:", torch.__version__)
|
| 12 |
|
| 13 |
from sklearn.model_selection import train_test_split
|
| 14 |
from sklearn.metrics import classification_report, precision_recall_fscore_support
|
| 15 |
|
|
|
|
|
|
|
| 16 |
# Hugging Face Hub
|
| 17 |
from huggingface_hub import hf_hub_download
|
| 18 |
|
| 19 |
# Hugging Face transformers
|
| 20 |
+
import transformers
|
| 21 |
from transformers import (
|
| 22 |
AutoTokenizer,
|
| 23 |
DebertaV2Tokenizer,
|
|
|
|
| 27 |
Trainer,
|
| 28 |
TrainingArguments
|
| 29 |
)
|
| 30 |
+
# Check versions
|
| 31 |
+
print("Transformers version:", transformers.__version__)
|
| 32 |
|
| 33 |
# Check for GPU availability
|
| 34 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|