piepeline
Browse files- metadata_transformer.py +11 -9
metadata_transformer.py
CHANGED
|
@@ -8,17 +8,19 @@ TOKEN = os.environ.get("TOKEN")
|
|
| 8 |
|
| 9 |
login(token=TOKEN)
|
| 10 |
|
| 11 |
-
|
| 12 |
|
| 13 |
-
tokenizer = AutoTokenizer.from_pretrained(model)
|
| 14 |
-
pipeline = transformers.pipeline(
|
| 15 |
-
"text-generation",
|
| 16 |
-
model=model,
|
| 17 |
-
torch_dtype=torch.float16,
|
| 18 |
-
device_map="auto",
|
| 19 |
-
)
|
| 20 |
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
sequences = pipeline(
|
| 23 |
'{} \n Translate the schema metadata file above to the schema: {}'.format(schema_input, schema_target),
|
| 24 |
do_sample=True,
|
|
|
|
| 8 |
|
| 9 |
login(token=TOKEN)
|
| 10 |
|
| 11 |
+
def translate(schema_input, schema_target):
|
| 12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
+
model = "meta-llama/Llama-2-7b-chat-hf"
|
| 15 |
+
|
| 16 |
+
tokenizer = AutoTokenizer.from_pretrained(model)
|
| 17 |
+
pipeline = transformers.pipeline(
|
| 18 |
+
"text-generation",
|
| 19 |
+
model=model,
|
| 20 |
+
torch_dtype=torch.float16,
|
| 21 |
+
device_map="auto",
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
sequences = pipeline(
|
| 25 |
'{} \n Translate the schema metadata file above to the schema: {}'.format(schema_input, schema_target),
|
| 26 |
do_sample=True,
|