| import os | |
| from huggingface_hub import login | |
| from easyllm.clients import huggingface | |
| from easyllm.prompt_utils import build_llama2_prompt | |
| TOKEN = os.environ.get("TOKEN") | |
| login(token=TOKEN) | |
| huggingface.prompt_builder = build_llama2_prompt | |
| system_message = """ | |
| You are a metadata schema translator. You translate metadata from one schema to another. | |
| """ | |
| def translate(inputs): | |
| schema_input, schema_target = inputs | |
| propmpt = '{} \n Translate the schema metadata file above to the schema: {}'.format(schema_input, schema_target), | |
| response = huggingface.ChatCompletion.create( | |
| model="meta-llama/Llama-2-70b-chat-hf", | |
| messages=[ | |
| {"role": "system", "content": system_message}, | |
| {"role": "user", "content": propmpt}, | |
| ], | |
| temperature=0.9, | |
| top_p=0.6, | |
| max_tokens=256, | |
| ) | |
| return response['choices'][0]['message']['content'] |