diff --git a/logs/l40-1gpu-2.txt b/logs/l40-1gpu-2.txt index df006886e0f3f8dda84f1475ac28fa851d945f5c..65b01f10bb65e2ea97406e48029530b3fdd2aa8c 100644 --- a/logs/l40-1gpu-2.txt +++ b/logs/l40-1gpu-2.txt @@ -644,4 +644,4 @@ Chinese: 老耿端起枪,眯缝起一只三角眼,一搂扳机响了枪, English:<|end|> <|assistant|> - 0%| | 0/567 [00:00 +You are a helpful assistant that translates Chinese to English.<|end|> +<|user|> +You will be given a Chinese sentence to translate. If it is an incomplete sentence, or if you are unsure about the meaning, simply copy the input text as your output. Do not output any additional sentence such as explanation or reasoning. + +Chinese: 老耿端起枪,眯缝起一只三角眼,一搂扳机响了枪,冰雹般的金麻雀劈哩啪啦往下落,铁砂子在柳枝间飞迸着,嚓嚓有声。 +English:<|end|> +<|assistant|> +Old Geng picked up his shotgun, squinted, and pulled the trigger. Two sparrows crashed to the ground like hailstones as shotgun pellets tore noisily through the branches.<|endoftext|> +-------------------------------------------------- +prompt: <|system|> +You are a helpful assistant that translates Chinese to English.<|end|> +<|user|> +You will be given a Chinese sentence to translate. If it is an incomplete sentence, or if you are unsure about the meaning, simply copy the input text as your output. Do not output any additional sentence such as explanation or reasoning. + +Chinese: 老耿端起枪,眯缝起一只三角眼,一搂扳机响了枪,冰雹般的金麻雀劈哩啪啦往下落,铁砂子在柳枝间飞迸着,嚓嚓有声。 +English:<|end|> +<|assistant|> + +(1) GPU = NVIDIA L40. Max memory = 44.309 GB. +7.119 GB of memory reserved. +found 6 checkpoints: ['checkpoint-35', 'checkpoint-70', 'checkpoint-105', 'checkpoint-140', 'checkpoint-175', 'checkpoint-210'] +Running from epoch 1 to 6 +Epoch 1 +loading adapter: llama-factory/saves/Phi-3.5-mini-instruct/checkpoint-35 + 0%| | 0/1133 [00:00 + model, tokenizer = load_model( + ^^^^^^^^^^^ + File "/common/home/users/d/dh.huang.2023/common2/code/rapget-translation/llm_toolkit/llm_utils.py", line 99, in load_model + adapter_name = model.load_adapter(adapter_name_or_path) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/common/home/users/d/dh.huang.2023/.conda/envs/llm-perf-bench/lib/python3.11/site-packages/transformers/integrations/peft.py", line 176, in load_adapter + adapter_config_file = find_adapter_config_file( + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/common/home/users/d/dh.huang.2023/.conda/envs/llm-perf-bench/lib/python3.11/site-packages/transformers/utils/peft_utils.py", line 88, in find_adapter_config_file + adapter_cached_filename = cached_file( + ^^^^^^^^^^^^ + File "/common/home/users/d/dh.huang.2023/.conda/envs/llm-perf-bench/lib/python3.11/site-packages/transformers/utils/hub.py", line 466, in cached_file + raise EnvironmentError( +OSError: Incorrect path_or_model_id: 'llama-factory/saves/Phi-3.5-mini-instruct/best_checkpoint'. Please provide either the path to a local folder or the repo_id of a model on the Hub. +srun: error: lexicon: task 0: Exited with exit code 1 +srun: Terminating StepId=80154.0 +Job ID: 80154 +Cluster: crimson +User/Group: dh.huang.2023/dh.huang.2023 +State: FAILED (exit code 1) +Nodes: 1 +Cores per node: 10 +CPU Utilized: 03:11:34 +CPU Efficiency: 9.88% of 1-08:19:50 core-walltime +Job Wall-clock time: 03:13:59 +Memory Utilized: 4.18 GB +Memory Efficiency: 1.63% of 256.00 GB diff --git a/results/mac-results_fine_tuned.csv b/results/mac-results_fine_tuned.csv index 587d96bab9ad4a40f83c683ef364458be8b398fe..a8dcc83b3ddf6eeb2797e84f6ae95d78c1d0938f 100644 --- a/results/mac-results_fine_tuned.csv +++ b/results/mac-results_fine_tuned.csv @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ff8b41d8c7f1628866eb347ca3fdad9b299881bc353bcc5cc79c54dca32228f1 -size 10563592 +oid sha256:98d9456d11ed997c00e0ae3d11c4da9f940f690ea6229c515799173b0e483bfa +size 11370121