Upload folder using huggingface_hub
Browse files- runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-29-54/.hydra/config.yaml +96 -0
- runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-29-54/.hydra/hydra.yaml +175 -0
- runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-29-54/.hydra/overrides.yaml +2 -0
- runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-29-54/cli.log +17 -0
- runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-29-54/error.log +50 -0
- runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-29-54/experiment_config.json +110 -0
- runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-30-04/.hydra/config.yaml +96 -0
- runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-30-04/.hydra/hydra.yaml +175 -0
- runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-30-04/.hydra/overrides.yaml +2 -0
- runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-30-04/cli.log +17 -0
- runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-30-04/error.log +50 -0
- runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-30-04/experiment_config.json +110 -0
    	
        runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-29-54/.hydra/config.yaml
    ADDED
    
    | @@ -0,0 +1,96 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            backend:
         | 
| 2 | 
            +
              name: pytorch
         | 
| 3 | 
            +
              version: 2.4.0
         | 
| 4 | 
            +
              _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
         | 
| 5 | 
            +
              task: text-generation
         | 
| 6 | 
            +
              model: meta-llama/Llama-3.1-8B-Instruct
         | 
| 7 | 
            +
              processor: meta-llama/Llama-3.1-8B-Instruct
         | 
| 8 | 
            +
              library: null
         | 
| 9 | 
            +
              device: cuda
         | 
| 10 | 
            +
              device_ids: '0'
         | 
| 11 | 
            +
              seed: 42
         | 
| 12 | 
            +
              inter_op_num_threads: null
         | 
| 13 | 
            +
              intra_op_num_threads: null
         | 
| 14 | 
            +
              hub_kwargs: {}
         | 
| 15 | 
            +
              no_weights: true
         | 
| 16 | 
            +
              device_map: null
         | 
| 17 | 
            +
              torch_dtype: null
         | 
| 18 | 
            +
              amp_autocast: false
         | 
| 19 | 
            +
              amp_dtype: null
         | 
| 20 | 
            +
              eval_mode: true
         | 
| 21 | 
            +
              to_bettertransformer: false
         | 
| 22 | 
            +
              low_cpu_mem_usage: null
         | 
| 23 | 
            +
              attn_implementation: null
         | 
| 24 | 
            +
              cache_implementation: null
         | 
| 25 | 
            +
              torch_compile: false
         | 
| 26 | 
            +
              torch_compile_config: {}
         | 
| 27 | 
            +
              quantization_scheme: null
         | 
| 28 | 
            +
              quantization_config: {}
         | 
| 29 | 
            +
              deepspeed_inference: false
         | 
| 30 | 
            +
              deepspeed_inference_config: {}
         | 
| 31 | 
            +
              peft_type: null
         | 
| 32 | 
            +
              peft_config: {}
         | 
| 33 | 
            +
            launcher:
         | 
| 34 | 
            +
              name: process
         | 
| 35 | 
            +
              _target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
         | 
| 36 | 
            +
              device_isolation: false
         | 
| 37 | 
            +
              device_isolation_action: warn
         | 
| 38 | 
            +
              start_method: spawn
         | 
| 39 | 
            +
            benchmark:
         | 
| 40 | 
            +
              name: energy_star
         | 
| 41 | 
            +
              _target_: optimum_benchmark.benchmarks.energy_star.benchmark.EnergyStarBenchmark
         | 
| 42 | 
            +
              dataset_name: EnergyStarAI/text_generation
         | 
| 43 | 
            +
              dataset_config: ''
         | 
| 44 | 
            +
              dataset_split: train
         | 
| 45 | 
            +
              num_samples: 1000
         | 
| 46 | 
            +
              input_shapes:
         | 
| 47 | 
            +
                batch_size: 1
         | 
| 48 | 
            +
              text_column_name: text
         | 
| 49 | 
            +
              truncation: true
         | 
| 50 | 
            +
              max_length: -1
         | 
| 51 | 
            +
              dataset_prefix1: ''
         | 
| 52 | 
            +
              dataset_prefix2: ''
         | 
| 53 | 
            +
              t5_task: ''
         | 
| 54 | 
            +
              image_column_name: image
         | 
| 55 | 
            +
              resize: false
         | 
| 56 | 
            +
              question_column_name: question
         | 
| 57 | 
            +
              context_column_name: context
         | 
| 58 | 
            +
              sentence1_column_name: sentence1
         | 
| 59 | 
            +
              sentence2_column_name: sentence2
         | 
| 60 | 
            +
              audio_column_name: audio
         | 
| 61 | 
            +
              iterations: 10
         | 
| 62 | 
            +
              warmup_runs: 10
         | 
| 63 | 
            +
              energy: true
         | 
| 64 | 
            +
              forward_kwargs: {}
         | 
| 65 | 
            +
              generate_kwargs:
         | 
| 66 | 
            +
                max_new_tokens: 10
         | 
| 67 | 
            +
                min_new_tokens: 10
         | 
| 68 | 
            +
              call_kwargs: {}
         | 
| 69 | 
            +
            experiment_name: text_generation
         | 
| 70 | 
            +
            environment:
         | 
| 71 | 
            +
              cpu: ' AMD EPYC 7R32'
         | 
| 72 | 
            +
              cpu_count: 48
         | 
| 73 | 
            +
              cpu_ram_mb: 200472.73984
         | 
| 74 | 
            +
              system: Linux
         | 
| 75 | 
            +
              machine: x86_64
         | 
| 76 | 
            +
              platform: Linux-5.10.192-183.736.amzn2.x86_64-x86_64-with-glibc2.35
         | 
| 77 | 
            +
              processor: x86_64
         | 
| 78 | 
            +
              python_version: 3.9.20
         | 
| 79 | 
            +
              gpu:
         | 
| 80 | 
            +
              - NVIDIA A10G
         | 
| 81 | 
            +
              gpu_count: 1
         | 
| 82 | 
            +
              gpu_vram_mb: 24146608128
         | 
| 83 | 
            +
              optimum_benchmark_version: 0.2.0
         | 
| 84 | 
            +
              optimum_benchmark_commit: null
         | 
| 85 | 
            +
              transformers_version: 4.44.0
         | 
| 86 | 
            +
              transformers_commit: null
         | 
| 87 | 
            +
              accelerate_version: 0.33.0
         | 
| 88 | 
            +
              accelerate_commit: null
         | 
| 89 | 
            +
              diffusers_version: 0.30.0
         | 
| 90 | 
            +
              diffusers_commit: null
         | 
| 91 | 
            +
              optimum_version: null
         | 
| 92 | 
            +
              optimum_commit: null
         | 
| 93 | 
            +
              timm_version: null
         | 
| 94 | 
            +
              timm_commit: null
         | 
| 95 | 
            +
              peft_version: null
         | 
| 96 | 
            +
              peft_commit: null
         | 
    	
        runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-29-54/.hydra/hydra.yaml
    ADDED
    
    | @@ -0,0 +1,175 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            hydra:
         | 
| 2 | 
            +
              run:
         | 
| 3 | 
            +
                dir: ./runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-29-54
         | 
| 4 | 
            +
              sweep:
         | 
| 5 | 
            +
                dir: sweeps/${experiment_name}/${backend.model}/${now:%Y-%m-%d-%H-%M-%S}
         | 
| 6 | 
            +
                subdir: ${hydra.job.num}
         | 
| 7 | 
            +
              launcher:
         | 
| 8 | 
            +
                _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
         | 
| 9 | 
            +
              sweeper:
         | 
| 10 | 
            +
                _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
         | 
| 11 | 
            +
                max_batch_size: null
         | 
| 12 | 
            +
                params: null
         | 
| 13 | 
            +
              help:
         | 
| 14 | 
            +
                app_name: ${hydra.job.name}
         | 
| 15 | 
            +
                header: '${hydra.help.app_name} is powered by Hydra.
         | 
| 16 | 
            +
             | 
| 17 | 
            +
                  '
         | 
| 18 | 
            +
                footer: 'Powered by Hydra (https://hydra.cc)
         | 
| 19 | 
            +
             | 
| 20 | 
            +
                  Use --hydra-help to view Hydra specific help
         | 
| 21 | 
            +
             | 
| 22 | 
            +
                  '
         | 
| 23 | 
            +
                template: '${hydra.help.header}
         | 
| 24 | 
            +
             | 
| 25 | 
            +
                  == Configuration groups ==
         | 
| 26 | 
            +
             | 
| 27 | 
            +
                  Compose your configuration from those groups (group=option)
         | 
| 28 | 
            +
             | 
| 29 | 
            +
             | 
| 30 | 
            +
                  $APP_CONFIG_GROUPS
         | 
| 31 | 
            +
             | 
| 32 | 
            +
             | 
| 33 | 
            +
                  == Config ==
         | 
| 34 | 
            +
             | 
| 35 | 
            +
                  Override anything in the config (foo.bar=value)
         | 
| 36 | 
            +
             | 
| 37 | 
            +
             | 
| 38 | 
            +
                  $CONFIG
         | 
| 39 | 
            +
             | 
| 40 | 
            +
             | 
| 41 | 
            +
                  ${hydra.help.footer}
         | 
| 42 | 
            +
             | 
| 43 | 
            +
                  '
         | 
| 44 | 
            +
              hydra_help:
         | 
| 45 | 
            +
                template: 'Hydra (${hydra.runtime.version})
         | 
| 46 | 
            +
             | 
| 47 | 
            +
                  See https://hydra.cc for more info.
         | 
| 48 | 
            +
             | 
| 49 | 
            +
             | 
| 50 | 
            +
                  == Flags ==
         | 
| 51 | 
            +
             | 
| 52 | 
            +
                  $FLAGS_HELP
         | 
| 53 | 
            +
             | 
| 54 | 
            +
             | 
| 55 | 
            +
                  == Configuration groups ==
         | 
| 56 | 
            +
             | 
| 57 | 
            +
                  Compose your configuration from those groups (For example, append hydra/job_logging=disabled
         | 
| 58 | 
            +
                  to command line)
         | 
| 59 | 
            +
             | 
| 60 | 
            +
             | 
| 61 | 
            +
                  $HYDRA_CONFIG_GROUPS
         | 
| 62 | 
            +
             | 
| 63 | 
            +
             | 
| 64 | 
            +
                  Use ''--cfg hydra'' to Show the Hydra config.
         | 
| 65 | 
            +
             | 
| 66 | 
            +
                  '
         | 
| 67 | 
            +
                hydra_help: ???
         | 
| 68 | 
            +
              hydra_logging:
         | 
| 69 | 
            +
                version: 1
         | 
| 70 | 
            +
                formatters:
         | 
| 71 | 
            +
                  colorlog:
         | 
| 72 | 
            +
                    (): colorlog.ColoredFormatter
         | 
| 73 | 
            +
                    format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
         | 
| 74 | 
            +
                handlers:
         | 
| 75 | 
            +
                  console:
         | 
| 76 | 
            +
                    class: logging.StreamHandler
         | 
| 77 | 
            +
                    formatter: colorlog
         | 
| 78 | 
            +
                    stream: ext://sys.stdout
         | 
| 79 | 
            +
                root:
         | 
| 80 | 
            +
                  level: INFO
         | 
| 81 | 
            +
                  handlers:
         | 
| 82 | 
            +
                  - console
         | 
| 83 | 
            +
                disable_existing_loggers: false
         | 
| 84 | 
            +
              job_logging:
         | 
| 85 | 
            +
                version: 1
         | 
| 86 | 
            +
                formatters:
         | 
| 87 | 
            +
                  simple:
         | 
| 88 | 
            +
                    format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
         | 
| 89 | 
            +
                  colorlog:
         | 
| 90 | 
            +
                    (): colorlog.ColoredFormatter
         | 
| 91 | 
            +
                    format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
         | 
| 92 | 
            +
                      - %(message)s'
         | 
| 93 | 
            +
                    log_colors:
         | 
| 94 | 
            +
                      DEBUG: purple
         | 
| 95 | 
            +
                      INFO: green
         | 
| 96 | 
            +
                      WARNING: yellow
         | 
| 97 | 
            +
                      ERROR: red
         | 
| 98 | 
            +
                      CRITICAL: red
         | 
| 99 | 
            +
                handlers:
         | 
| 100 | 
            +
                  console:
         | 
| 101 | 
            +
                    class: logging.StreamHandler
         | 
| 102 | 
            +
                    formatter: colorlog
         | 
| 103 | 
            +
                    stream: ext://sys.stdout
         | 
| 104 | 
            +
                  file:
         | 
| 105 | 
            +
                    class: logging.FileHandler
         | 
| 106 | 
            +
                    formatter: simple
         | 
| 107 | 
            +
                    filename: ${hydra.job.name}.log
         | 
| 108 | 
            +
                root:
         | 
| 109 | 
            +
                  level: INFO
         | 
| 110 | 
            +
                  handlers:
         | 
| 111 | 
            +
                  - console
         | 
| 112 | 
            +
                  - file
         | 
| 113 | 
            +
                disable_existing_loggers: false
         | 
| 114 | 
            +
              env: {}
         | 
| 115 | 
            +
              mode: RUN
         | 
| 116 | 
            +
              searchpath: []
         | 
| 117 | 
            +
              callbacks: {}
         | 
| 118 | 
            +
              output_subdir: .hydra
         | 
| 119 | 
            +
              overrides:
         | 
| 120 | 
            +
                hydra:
         | 
| 121 | 
            +
                - hydra.run.dir=./runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-29-54
         | 
| 122 | 
            +
                - hydra.mode=RUN
         | 
| 123 | 
            +
                task:
         | 
| 124 | 
            +
                - backend.model=meta-llama/Llama-3.1-8B-Instruct
         | 
| 125 | 
            +
                - backend.processor=meta-llama/Llama-3.1-8B-Instruct
         | 
| 126 | 
            +
              job:
         | 
| 127 | 
            +
                name: cli
         | 
| 128 | 
            +
                chdir: true
         | 
| 129 | 
            +
                override_dirname: backend.model=meta-llama/Llama-3.1-8B-Instruct,backend.processor=meta-llama/Llama-3.1-8B-Instruct
         | 
| 130 | 
            +
                id: ???
         | 
| 131 | 
            +
                num: ???
         | 
| 132 | 
            +
                config_name: text_generation
         | 
| 133 | 
            +
                env_set:
         | 
| 134 | 
            +
                  OVERRIDE_BENCHMARKS: '1'
         | 
| 135 | 
            +
                env_copy: []
         | 
| 136 | 
            +
                config:
         | 
| 137 | 
            +
                  override_dirname:
         | 
| 138 | 
            +
                    kv_sep: '='
         | 
| 139 | 
            +
                    item_sep: ','
         | 
| 140 | 
            +
                    exclude_keys: []
         | 
| 141 | 
            +
              runtime:
         | 
| 142 | 
            +
                version: 1.3.2
         | 
| 143 | 
            +
                version_base: '1.3'
         | 
| 144 | 
            +
                cwd: /
         | 
| 145 | 
            +
                config_sources:
         | 
| 146 | 
            +
                - path: hydra.conf
         | 
| 147 | 
            +
                  schema: pkg
         | 
| 148 | 
            +
                  provider: hydra
         | 
| 149 | 
            +
                - path: optimum_benchmark
         | 
| 150 | 
            +
                  schema: pkg
         | 
| 151 | 
            +
                  provider: main
         | 
| 152 | 
            +
                - path: hydra_plugins.hydra_colorlog.conf
         | 
| 153 | 
            +
                  schema: pkg
         | 
| 154 | 
            +
                  provider: hydra-colorlog
         | 
| 155 | 
            +
                - path: /optimum-benchmark/examples/energy_star
         | 
| 156 | 
            +
                  schema: file
         | 
| 157 | 
            +
                  provider: command-line
         | 
| 158 | 
            +
                - path: ''
         | 
| 159 | 
            +
                  schema: structured
         | 
| 160 | 
            +
                  provider: schema
         | 
| 161 | 
            +
                output_dir: /runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-29-54
         | 
| 162 | 
            +
                choices:
         | 
| 163 | 
            +
                  benchmark: energy_star
         | 
| 164 | 
            +
                  launcher: process
         | 
| 165 | 
            +
                  backend: pytorch
         | 
| 166 | 
            +
                  hydra/env: default
         | 
| 167 | 
            +
                  hydra/callbacks: null
         | 
| 168 | 
            +
                  hydra/job_logging: colorlog
         | 
| 169 | 
            +
                  hydra/hydra_logging: colorlog
         | 
| 170 | 
            +
                  hydra/hydra_help: default
         | 
| 171 | 
            +
                  hydra/help: default
         | 
| 172 | 
            +
                  hydra/sweeper: basic
         | 
| 173 | 
            +
                  hydra/launcher: basic
         | 
| 174 | 
            +
                  hydra/output: default
         | 
| 175 | 
            +
              verbose: false
         | 
    	
        runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-29-54/.hydra/overrides.yaml
    ADDED
    
    | @@ -0,0 +1,2 @@ | |
|  | |
|  | 
|  | |
| 1 | 
            +
            - backend.model=meta-llama/Llama-3.1-8B-Instruct
         | 
| 2 | 
            +
            - backend.processor=meta-llama/Llama-3.1-8B-Instruct
         | 
    	
        runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-29-54/cli.log
    ADDED
    
    | @@ -0,0 +1,17 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            [2024-10-23 20:29:57,163][launcher][INFO] - ََAllocating process launcher
         | 
| 2 | 
            +
            [2024-10-23 20:29:57,163][process][INFO] - 	+ Setting multiprocessing start method to spawn.
         | 
| 3 | 
            +
            [2024-10-23 20:29:57,173][process][INFO] - 	+ Launched benchmark in isolated process 200.
         | 
| 4 | 
            +
            [PROC-0][2024-10-23 20:29:59,693][datasets][INFO] - PyTorch version 2.4.0 available.
         | 
| 5 | 
            +
            [PROC-0][2024-10-23 20:30:00,603][backend][INFO] - َAllocating pytorch backend
         | 
| 6 | 
            +
            [PROC-0][2024-10-23 20:30:00,603][backend][INFO] - 	+ Setting random seed to 42
         | 
| 7 | 
            +
            [PROC-0][2024-10-23 20:30:01,832][pytorch][INFO] - 	+ Using AutoModel class AutoModelForCausalLM
         | 
| 8 | 
            +
            [PROC-0][2024-10-23 20:30:01,832][pytorch][INFO] - 	+ Creating backend temporary directory
         | 
| 9 | 
            +
            [PROC-0][2024-10-23 20:30:01,832][pytorch][INFO] - 	+ Loading model with random weights
         | 
| 10 | 
            +
            [PROC-0][2024-10-23 20:30:01,832][pytorch][INFO] - 	+ Creating no weights model
         | 
| 11 | 
            +
            [PROC-0][2024-10-23 20:30:01,833][pytorch][INFO] - 	+ Creating no weights model directory
         | 
| 12 | 
            +
            [PROC-0][2024-10-23 20:30:01,833][pytorch][INFO] - 	+ Creating no weights model state dict
         | 
| 13 | 
            +
            [PROC-0][2024-10-23 20:30:01,852][pytorch][INFO] - 	+ Saving no weights model safetensors
         | 
| 14 | 
            +
            [PROC-0][2024-10-23 20:30:01,853][pytorch][INFO] - 	+ Saving no weights model pretrained config
         | 
| 15 | 
            +
            [PROC-0][2024-10-23 20:30:01,853][pytorch][INFO] - 	+ Loading no weights AutoModel
         | 
| 16 | 
            +
            [PROC-0][2024-10-23 20:30:01,854][pytorch][INFO] - 	+ Loading model directly on device: cuda
         | 
| 17 | 
            +
            [2024-10-23 20:30:02,845][experiment][ERROR] - Error during experiment
         | 
    	
        runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-29-54/error.log
    ADDED
    
    | @@ -0,0 +1,50 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            Error executing job with overrides: ['backend.model=meta-llama/Llama-3.1-8B-Instruct', 'backend.processor=meta-llama/Llama-3.1-8B-Instruct']
         | 
| 2 | 
            +
            Traceback (most recent call last):
         | 
| 3 | 
            +
              File "/optimum-benchmark/optimum_benchmark/cli.py", line 65, in benchmark_cli
         | 
| 4 | 
            +
                benchmark_report: BenchmarkReport = launch(experiment_config=experiment_config)
         | 
| 5 | 
            +
              File "/optimum-benchmark/optimum_benchmark/experiment.py", line 102, in launch
         | 
| 6 | 
            +
                raise error
         | 
| 7 | 
            +
              File "/optimum-benchmark/optimum_benchmark/experiment.py", line 90, in launch
         | 
| 8 | 
            +
                report = launcher.launch(run, experiment_config.benchmark, experiment_config.backend)
         | 
| 9 | 
            +
              File "/optimum-benchmark/optimum_benchmark/launchers/process/launcher.py", line 47, in launch
         | 
| 10 | 
            +
                while not process_context.join():
         | 
| 11 | 
            +
              File "/opt/conda/lib/python3.9/site-packages/torch/multiprocessing/spawn.py", line 189, in join
         | 
| 12 | 
            +
                raise ProcessRaisedException(msg, error_index, failed_process.pid)
         | 
| 13 | 
            +
            torch.multiprocessing.spawn.ProcessRaisedException: 
         | 
| 14 | 
            +
             | 
| 15 | 
            +
            -- Process 0 terminated with the following error:
         | 
| 16 | 
            +
            Traceback (most recent call last):
         | 
| 17 | 
            +
              File "/opt/conda/lib/python3.9/site-packages/torch/multiprocessing/spawn.py", line 76, in _wrap
         | 
| 18 | 
            +
                fn(i, *args)
         | 
| 19 | 
            +
              File "/optimum-benchmark/optimum_benchmark/launchers/process/launcher.py", line 63, in entrypoint
         | 
| 20 | 
            +
                worker_output = worker(*worker_args)
         | 
| 21 | 
            +
              File "/optimum-benchmark/optimum_benchmark/experiment.py", line 55, in run
         | 
| 22 | 
            +
                backend: Backend = backend_factory(backend_config)
         | 
| 23 | 
            +
              File "/optimum-benchmark/optimum_benchmark/backends/pytorch/backend.py", line 81, in __init__
         | 
| 24 | 
            +
                self.load_model_with_no_weights()
         | 
| 25 | 
            +
              File "/optimum-benchmark/optimum_benchmark/backends/pytorch/backend.py", line 246, in load_model_with_no_weights
         | 
| 26 | 
            +
                self.load_model_from_pretrained()
         | 
| 27 | 
            +
              File "/optimum-benchmark/optimum_benchmark/backends/pytorch/backend.py", line 204, in load_model_from_pretrained
         | 
| 28 | 
            +
                self.pretrained_model = self.automodel_class.from_pretrained(
         | 
| 29 | 
            +
              File "/opt/conda/lib/python3.9/site-packages/transformers/models/auto/auto_factory.py", line 564, in from_pretrained
         | 
| 30 | 
            +
                return model_class.from_pretrained(
         | 
| 31 | 
            +
              File "/opt/conda/lib/python3.9/site-packages/transformers/modeling_utils.py", line 3810, in from_pretrained
         | 
| 32 | 
            +
                model = cls(config, *model_args, **model_kwargs)
         | 
| 33 | 
            +
              File "/opt/conda/lib/python3.9/site-packages/transformers/models/llama/modeling_llama.py", line 1116, in __init__
         | 
| 34 | 
            +
                self.model = LlamaModel(config)
         | 
| 35 | 
            +
              File "/opt/conda/lib/python3.9/site-packages/transformers/models/llama/modeling_llama.py", line 902, in __init__
         | 
| 36 | 
            +
                [LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
         | 
| 37 | 
            +
              File "/opt/conda/lib/python3.9/site-packages/transformers/models/llama/modeling_llama.py", line 902, in <listcomp>
         | 
| 38 | 
            +
                [LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
         | 
| 39 | 
            +
              File "/opt/conda/lib/python3.9/site-packages/transformers/models/llama/modeling_llama.py", line 691, in __init__
         | 
| 40 | 
            +
                self.mlp = LlamaMLP(config)
         | 
| 41 | 
            +
              File "/opt/conda/lib/python3.9/site-packages/transformers/models/llama/modeling_llama.py", line 287, in __init__
         | 
| 42 | 
            +
                self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
         | 
| 43 | 
            +
              File "/opt/conda/lib/python3.9/site-packages/torch/nn/modules/linear.py", line 99, in __init__
         | 
| 44 | 
            +
                self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs))
         | 
| 45 | 
            +
              File "/opt/conda/lib/python3.9/site-packages/torch/utils/_device.py", line 79, in __torch_function__
         | 
| 46 | 
            +
                return func(*args, **kwargs)
         | 
| 47 | 
            +
            torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 224.00 MiB. GPU 0 has a total capacity of 22.19 GiB of which 69.50 MiB is free. Process 28476 has 22.12 GiB memory in use. Of the allocated memory 21.83 GiB is allocated by PyTorch, and 1.24 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation.  See documentation for Memory Management  (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
         | 
| 48 | 
            +
             | 
| 49 | 
            +
             | 
| 50 | 
            +
            Set the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.
         | 
    	
        runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-29-54/experiment_config.json
    ADDED
    
    | @@ -0,0 +1,110 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "experiment_name": "text_generation",
         | 
| 3 | 
            +
                "backend": {
         | 
| 4 | 
            +
                    "name": "pytorch",
         | 
| 5 | 
            +
                    "version": "2.4.0",
         | 
| 6 | 
            +
                    "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
         | 
| 7 | 
            +
                    "task": "text-generation",
         | 
| 8 | 
            +
                    "model": "meta-llama/Llama-3.1-8B-Instruct",
         | 
| 9 | 
            +
                    "processor": "meta-llama/Llama-3.1-8B-Instruct",
         | 
| 10 | 
            +
                    "library": "transformers",
         | 
| 11 | 
            +
                    "device": "cuda",
         | 
| 12 | 
            +
                    "device_ids": "0",
         | 
| 13 | 
            +
                    "seed": 42,
         | 
| 14 | 
            +
                    "inter_op_num_threads": null,
         | 
| 15 | 
            +
                    "intra_op_num_threads": null,
         | 
| 16 | 
            +
                    "hub_kwargs": {
         | 
| 17 | 
            +
                        "revision": "main",
         | 
| 18 | 
            +
                        "force_download": false,
         | 
| 19 | 
            +
                        "local_files_only": false,
         | 
| 20 | 
            +
                        "trust_remote_code": true
         | 
| 21 | 
            +
                    },
         | 
| 22 | 
            +
                    "no_weights": true,
         | 
| 23 | 
            +
                    "device_map": null,
         | 
| 24 | 
            +
                    "torch_dtype": null,
         | 
| 25 | 
            +
                    "amp_autocast": false,
         | 
| 26 | 
            +
                    "amp_dtype": null,
         | 
| 27 | 
            +
                    "eval_mode": true,
         | 
| 28 | 
            +
                    "to_bettertransformer": false,
         | 
| 29 | 
            +
                    "low_cpu_mem_usage": null,
         | 
| 30 | 
            +
                    "attn_implementation": null,
         | 
| 31 | 
            +
                    "cache_implementation": null,
         | 
| 32 | 
            +
                    "torch_compile": false,
         | 
| 33 | 
            +
                    "torch_compile_config": {},
         | 
| 34 | 
            +
                    "quantization_scheme": null,
         | 
| 35 | 
            +
                    "quantization_config": {},
         | 
| 36 | 
            +
                    "deepspeed_inference": false,
         | 
| 37 | 
            +
                    "deepspeed_inference_config": {},
         | 
| 38 | 
            +
                    "peft_type": null,
         | 
| 39 | 
            +
                    "peft_config": {}
         | 
| 40 | 
            +
                },
         | 
| 41 | 
            +
                "launcher": {
         | 
| 42 | 
            +
                    "name": "process",
         | 
| 43 | 
            +
                    "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
         | 
| 44 | 
            +
                    "device_isolation": false,
         | 
| 45 | 
            +
                    "device_isolation_action": "warn",
         | 
| 46 | 
            +
                    "start_method": "spawn"
         | 
| 47 | 
            +
                },
         | 
| 48 | 
            +
                "benchmark": {
         | 
| 49 | 
            +
                    "name": "energy_star",
         | 
| 50 | 
            +
                    "_target_": "optimum_benchmark.benchmarks.energy_star.benchmark.EnergyStarBenchmark",
         | 
| 51 | 
            +
                    "dataset_name": "EnergyStarAI/text_generation",
         | 
| 52 | 
            +
                    "dataset_config": "",
         | 
| 53 | 
            +
                    "dataset_split": "train",
         | 
| 54 | 
            +
                    "num_samples": 1000,
         | 
| 55 | 
            +
                    "input_shapes": {
         | 
| 56 | 
            +
                        "batch_size": 1
         | 
| 57 | 
            +
                    },
         | 
| 58 | 
            +
                    "text_column_name": "text",
         | 
| 59 | 
            +
                    "truncation": true,
         | 
| 60 | 
            +
                    "max_length": -1,
         | 
| 61 | 
            +
                    "dataset_prefix1": "",
         | 
| 62 | 
            +
                    "dataset_prefix2": "",
         | 
| 63 | 
            +
                    "t5_task": "",
         | 
| 64 | 
            +
                    "image_column_name": "image",
         | 
| 65 | 
            +
                    "resize": false,
         | 
| 66 | 
            +
                    "question_column_name": "question",
         | 
| 67 | 
            +
                    "context_column_name": "context",
         | 
| 68 | 
            +
                    "sentence1_column_name": "sentence1",
         | 
| 69 | 
            +
                    "sentence2_column_name": "sentence2",
         | 
| 70 | 
            +
                    "audio_column_name": "audio",
         | 
| 71 | 
            +
                    "iterations": 10,
         | 
| 72 | 
            +
                    "warmup_runs": 10,
         | 
| 73 | 
            +
                    "energy": true,
         | 
| 74 | 
            +
                    "forward_kwargs": {},
         | 
| 75 | 
            +
                    "generate_kwargs": {
         | 
| 76 | 
            +
                        "max_new_tokens": 10,
         | 
| 77 | 
            +
                        "min_new_tokens": 10
         | 
| 78 | 
            +
                    },
         | 
| 79 | 
            +
                    "call_kwargs": {}
         | 
| 80 | 
            +
                },
         | 
| 81 | 
            +
                "environment": {
         | 
| 82 | 
            +
                    "cpu": " AMD EPYC 7R32",
         | 
| 83 | 
            +
                    "cpu_count": 48,
         | 
| 84 | 
            +
                    "cpu_ram_mb": 200472.73984,
         | 
| 85 | 
            +
                    "system": "Linux",
         | 
| 86 | 
            +
                    "machine": "x86_64",
         | 
| 87 | 
            +
                    "platform": "Linux-5.10.192-183.736.amzn2.x86_64-x86_64-with-glibc2.35",
         | 
| 88 | 
            +
                    "processor": "x86_64",
         | 
| 89 | 
            +
                    "python_version": "3.9.20",
         | 
| 90 | 
            +
                    "gpu": [
         | 
| 91 | 
            +
                        "NVIDIA A10G"
         | 
| 92 | 
            +
                    ],
         | 
| 93 | 
            +
                    "gpu_count": 1,
         | 
| 94 | 
            +
                    "gpu_vram_mb": 24146608128,
         | 
| 95 | 
            +
                    "optimum_benchmark_version": "0.2.0",
         | 
| 96 | 
            +
                    "optimum_benchmark_commit": null,
         | 
| 97 | 
            +
                    "transformers_version": "4.44.0",
         | 
| 98 | 
            +
                    "transformers_commit": null,
         | 
| 99 | 
            +
                    "accelerate_version": "0.33.0",
         | 
| 100 | 
            +
                    "accelerate_commit": null,
         | 
| 101 | 
            +
                    "diffusers_version": "0.30.0",
         | 
| 102 | 
            +
                    "diffusers_commit": null,
         | 
| 103 | 
            +
                    "optimum_version": null,
         | 
| 104 | 
            +
                    "optimum_commit": null,
         | 
| 105 | 
            +
                    "timm_version": null,
         | 
| 106 | 
            +
                    "timm_commit": null,
         | 
| 107 | 
            +
                    "peft_version": null,
         | 
| 108 | 
            +
                    "peft_commit": null
         | 
| 109 | 
            +
                }
         | 
| 110 | 
            +
            }
         | 
    	
        runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-30-04/.hydra/config.yaml
    ADDED
    
    | @@ -0,0 +1,96 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            backend:
         | 
| 2 | 
            +
              name: pytorch
         | 
| 3 | 
            +
              version: 2.4.0
         | 
| 4 | 
            +
              _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
         | 
| 5 | 
            +
              task: text-generation
         | 
| 6 | 
            +
              model: meta-llama/Llama-3.1-8B-Instruct
         | 
| 7 | 
            +
              processor: meta-llama/Llama-3.1-8B-Instruct
         | 
| 8 | 
            +
              library: null
         | 
| 9 | 
            +
              device: cuda
         | 
| 10 | 
            +
              device_ids: '0'
         | 
| 11 | 
            +
              seed: 42
         | 
| 12 | 
            +
              inter_op_num_threads: null
         | 
| 13 | 
            +
              intra_op_num_threads: null
         | 
| 14 | 
            +
              hub_kwargs: {}
         | 
| 15 | 
            +
              no_weights: true
         | 
| 16 | 
            +
              device_map: null
         | 
| 17 | 
            +
              torch_dtype: null
         | 
| 18 | 
            +
              amp_autocast: false
         | 
| 19 | 
            +
              amp_dtype: null
         | 
| 20 | 
            +
              eval_mode: true
         | 
| 21 | 
            +
              to_bettertransformer: false
         | 
| 22 | 
            +
              low_cpu_mem_usage: null
         | 
| 23 | 
            +
              attn_implementation: null
         | 
| 24 | 
            +
              cache_implementation: null
         | 
| 25 | 
            +
              torch_compile: false
         | 
| 26 | 
            +
              torch_compile_config: {}
         | 
| 27 | 
            +
              quantization_scheme: null
         | 
| 28 | 
            +
              quantization_config: {}
         | 
| 29 | 
            +
              deepspeed_inference: false
         | 
| 30 | 
            +
              deepspeed_inference_config: {}
         | 
| 31 | 
            +
              peft_type: null
         | 
| 32 | 
            +
              peft_config: {}
         | 
| 33 | 
            +
            launcher:
         | 
| 34 | 
            +
              name: process
         | 
| 35 | 
            +
              _target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
         | 
| 36 | 
            +
              device_isolation: false
         | 
| 37 | 
            +
              device_isolation_action: warn
         | 
| 38 | 
            +
              start_method: spawn
         | 
| 39 | 
            +
            benchmark:
         | 
| 40 | 
            +
              name: energy_star
         | 
| 41 | 
            +
              _target_: optimum_benchmark.benchmarks.energy_star.benchmark.EnergyStarBenchmark
         | 
| 42 | 
            +
              dataset_name: EnergyStarAI/text_generation
         | 
| 43 | 
            +
              dataset_config: ''
         | 
| 44 | 
            +
              dataset_split: train
         | 
| 45 | 
            +
              num_samples: 1000
         | 
| 46 | 
            +
              input_shapes:
         | 
| 47 | 
            +
                batch_size: 1
         | 
| 48 | 
            +
              text_column_name: text
         | 
| 49 | 
            +
              truncation: true
         | 
| 50 | 
            +
              max_length: -1
         | 
| 51 | 
            +
              dataset_prefix1: ''
         | 
| 52 | 
            +
              dataset_prefix2: ''
         | 
| 53 | 
            +
              t5_task: ''
         | 
| 54 | 
            +
              image_column_name: image
         | 
| 55 | 
            +
              resize: false
         | 
| 56 | 
            +
              question_column_name: question
         | 
| 57 | 
            +
              context_column_name: context
         | 
| 58 | 
            +
              sentence1_column_name: sentence1
         | 
| 59 | 
            +
              sentence2_column_name: sentence2
         | 
| 60 | 
            +
              audio_column_name: audio
         | 
| 61 | 
            +
              iterations: 10
         | 
| 62 | 
            +
              warmup_runs: 10
         | 
| 63 | 
            +
              energy: true
         | 
| 64 | 
            +
              forward_kwargs: {}
         | 
| 65 | 
            +
              generate_kwargs:
         | 
| 66 | 
            +
                max_new_tokens: 10
         | 
| 67 | 
            +
                min_new_tokens: 10
         | 
| 68 | 
            +
              call_kwargs: {}
         | 
| 69 | 
            +
            experiment_name: text_generation
         | 
| 70 | 
            +
            environment:
         | 
| 71 | 
            +
              cpu: ' AMD EPYC 7R32'
         | 
| 72 | 
            +
              cpu_count: 48
         | 
| 73 | 
            +
              cpu_ram_mb: 200472.73984
         | 
| 74 | 
            +
              system: Linux
         | 
| 75 | 
            +
              machine: x86_64
         | 
| 76 | 
            +
              platform: Linux-5.10.192-183.736.amzn2.x86_64-x86_64-with-glibc2.35
         | 
| 77 | 
            +
              processor: x86_64
         | 
| 78 | 
            +
              python_version: 3.9.20
         | 
| 79 | 
            +
              gpu:
         | 
| 80 | 
            +
              - NVIDIA A10G
         | 
| 81 | 
            +
              gpu_count: 1
         | 
| 82 | 
            +
              gpu_vram_mb: 24146608128
         | 
| 83 | 
            +
              optimum_benchmark_version: 0.2.0
         | 
| 84 | 
            +
              optimum_benchmark_commit: null
         | 
| 85 | 
            +
              transformers_version: 4.44.0
         | 
| 86 | 
            +
              transformers_commit: null
         | 
| 87 | 
            +
              accelerate_version: 0.33.0
         | 
| 88 | 
            +
              accelerate_commit: null
         | 
| 89 | 
            +
              diffusers_version: 0.30.0
         | 
| 90 | 
            +
              diffusers_commit: null
         | 
| 91 | 
            +
              optimum_version: null
         | 
| 92 | 
            +
              optimum_commit: null
         | 
| 93 | 
            +
              timm_version: null
         | 
| 94 | 
            +
              timm_commit: null
         | 
| 95 | 
            +
              peft_version: null
         | 
| 96 | 
            +
              peft_commit: null
         | 
    	
        runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-30-04/.hydra/hydra.yaml
    ADDED
    
    | @@ -0,0 +1,175 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            hydra:
         | 
| 2 | 
            +
              run:
         | 
| 3 | 
            +
                dir: ./runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-30-04
         | 
| 4 | 
            +
              sweep:
         | 
| 5 | 
            +
                dir: sweeps/${experiment_name}/${backend.model}/${now:%Y-%m-%d-%H-%M-%S}
         | 
| 6 | 
            +
                subdir: ${hydra.job.num}
         | 
| 7 | 
            +
              launcher:
         | 
| 8 | 
            +
                _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
         | 
| 9 | 
            +
              sweeper:
         | 
| 10 | 
            +
                _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
         | 
| 11 | 
            +
                max_batch_size: null
         | 
| 12 | 
            +
                params: null
         | 
| 13 | 
            +
              help:
         | 
| 14 | 
            +
                app_name: ${hydra.job.name}
         | 
| 15 | 
            +
                header: '${hydra.help.app_name} is powered by Hydra.
         | 
| 16 | 
            +
             | 
| 17 | 
            +
                  '
         | 
| 18 | 
            +
                footer: 'Powered by Hydra (https://hydra.cc)
         | 
| 19 | 
            +
             | 
| 20 | 
            +
                  Use --hydra-help to view Hydra specific help
         | 
| 21 | 
            +
             | 
| 22 | 
            +
                  '
         | 
| 23 | 
            +
                template: '${hydra.help.header}
         | 
| 24 | 
            +
             | 
| 25 | 
            +
                  == Configuration groups ==
         | 
| 26 | 
            +
             | 
| 27 | 
            +
                  Compose your configuration from those groups (group=option)
         | 
| 28 | 
            +
             | 
| 29 | 
            +
             | 
| 30 | 
            +
                  $APP_CONFIG_GROUPS
         | 
| 31 | 
            +
             | 
| 32 | 
            +
             | 
| 33 | 
            +
                  == Config ==
         | 
| 34 | 
            +
             | 
| 35 | 
            +
                  Override anything in the config (foo.bar=value)
         | 
| 36 | 
            +
             | 
| 37 | 
            +
             | 
| 38 | 
            +
                  $CONFIG
         | 
| 39 | 
            +
             | 
| 40 | 
            +
             | 
| 41 | 
            +
                  ${hydra.help.footer}
         | 
| 42 | 
            +
             | 
| 43 | 
            +
                  '
         | 
| 44 | 
            +
              hydra_help:
         | 
| 45 | 
            +
                template: 'Hydra (${hydra.runtime.version})
         | 
| 46 | 
            +
             | 
| 47 | 
            +
                  See https://hydra.cc for more info.
         | 
| 48 | 
            +
             | 
| 49 | 
            +
             | 
| 50 | 
            +
                  == Flags ==
         | 
| 51 | 
            +
             | 
| 52 | 
            +
                  $FLAGS_HELP
         | 
| 53 | 
            +
             | 
| 54 | 
            +
             | 
| 55 | 
            +
                  == Configuration groups ==
         | 
| 56 | 
            +
             | 
| 57 | 
            +
                  Compose your configuration from those groups (For example, append hydra/job_logging=disabled
         | 
| 58 | 
            +
                  to command line)
         | 
| 59 | 
            +
             | 
| 60 | 
            +
             | 
| 61 | 
            +
                  $HYDRA_CONFIG_GROUPS
         | 
| 62 | 
            +
             | 
| 63 | 
            +
             | 
| 64 | 
            +
                  Use ''--cfg hydra'' to Show the Hydra config.
         | 
| 65 | 
            +
             | 
| 66 | 
            +
                  '
         | 
| 67 | 
            +
                hydra_help: ???
         | 
| 68 | 
            +
              hydra_logging:
         | 
| 69 | 
            +
                version: 1
         | 
| 70 | 
            +
                formatters:
         | 
| 71 | 
            +
                  colorlog:
         | 
| 72 | 
            +
                    (): colorlog.ColoredFormatter
         | 
| 73 | 
            +
                    format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
         | 
| 74 | 
            +
                handlers:
         | 
| 75 | 
            +
                  console:
         | 
| 76 | 
            +
                    class: logging.StreamHandler
         | 
| 77 | 
            +
                    formatter: colorlog
         | 
| 78 | 
            +
                    stream: ext://sys.stdout
         | 
| 79 | 
            +
                root:
         | 
| 80 | 
            +
                  level: INFO
         | 
| 81 | 
            +
                  handlers:
         | 
| 82 | 
            +
                  - console
         | 
| 83 | 
            +
                disable_existing_loggers: false
         | 
| 84 | 
            +
              job_logging:
         | 
| 85 | 
            +
                version: 1
         | 
| 86 | 
            +
                formatters:
         | 
| 87 | 
            +
                  simple:
         | 
| 88 | 
            +
                    format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
         | 
| 89 | 
            +
                  colorlog:
         | 
| 90 | 
            +
                    (): colorlog.ColoredFormatter
         | 
| 91 | 
            +
                    format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
         | 
| 92 | 
            +
                      - %(message)s'
         | 
| 93 | 
            +
                    log_colors:
         | 
| 94 | 
            +
                      DEBUG: purple
         | 
| 95 | 
            +
                      INFO: green
         | 
| 96 | 
            +
                      WARNING: yellow
         | 
| 97 | 
            +
                      ERROR: red
         | 
| 98 | 
            +
                      CRITICAL: red
         | 
| 99 | 
            +
                handlers:
         | 
| 100 | 
            +
                  console:
         | 
| 101 | 
            +
                    class: logging.StreamHandler
         | 
| 102 | 
            +
                    formatter: colorlog
         | 
| 103 | 
            +
                    stream: ext://sys.stdout
         | 
| 104 | 
            +
                  file:
         | 
| 105 | 
            +
                    class: logging.FileHandler
         | 
| 106 | 
            +
                    formatter: simple
         | 
| 107 | 
            +
                    filename: ${hydra.job.name}.log
         | 
| 108 | 
            +
                root:
         | 
| 109 | 
            +
                  level: INFO
         | 
| 110 | 
            +
                  handlers:
         | 
| 111 | 
            +
                  - console
         | 
| 112 | 
            +
                  - file
         | 
| 113 | 
            +
                disable_existing_loggers: false
         | 
| 114 | 
            +
              env: {}
         | 
| 115 | 
            +
              mode: RUN
         | 
| 116 | 
            +
              searchpath: []
         | 
| 117 | 
            +
              callbacks: {}
         | 
| 118 | 
            +
              output_subdir: .hydra
         | 
| 119 | 
            +
              overrides:
         | 
| 120 | 
            +
                hydra:
         | 
| 121 | 
            +
                - hydra.run.dir=./runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-30-04
         | 
| 122 | 
            +
                - hydra.mode=RUN
         | 
| 123 | 
            +
                task:
         | 
| 124 | 
            +
                - backend.model=meta-llama/Llama-3.1-8B-Instruct
         | 
| 125 | 
            +
                - backend.processor=meta-llama/Llama-3.1-8B-Instruct
         | 
| 126 | 
            +
              job:
         | 
| 127 | 
            +
                name: cli
         | 
| 128 | 
            +
                chdir: true
         | 
| 129 | 
            +
                override_dirname: backend.model=meta-llama/Llama-3.1-8B-Instruct,backend.processor=meta-llama/Llama-3.1-8B-Instruct
         | 
| 130 | 
            +
                id: ???
         | 
| 131 | 
            +
                num: ???
         | 
| 132 | 
            +
                config_name: text_generation
         | 
| 133 | 
            +
                env_set:
         | 
| 134 | 
            +
                  OVERRIDE_BENCHMARKS: '1'
         | 
| 135 | 
            +
                env_copy: []
         | 
| 136 | 
            +
                config:
         | 
| 137 | 
            +
                  override_dirname:
         | 
| 138 | 
            +
                    kv_sep: '='
         | 
| 139 | 
            +
                    item_sep: ','
         | 
| 140 | 
            +
                    exclude_keys: []
         | 
| 141 | 
            +
              runtime:
         | 
| 142 | 
            +
                version: 1.3.2
         | 
| 143 | 
            +
                version_base: '1.3'
         | 
| 144 | 
            +
                cwd: /
         | 
| 145 | 
            +
                config_sources:
         | 
| 146 | 
            +
                - path: hydra.conf
         | 
| 147 | 
            +
                  schema: pkg
         | 
| 148 | 
            +
                  provider: hydra
         | 
| 149 | 
            +
                - path: optimum_benchmark
         | 
| 150 | 
            +
                  schema: pkg
         | 
| 151 | 
            +
                  provider: main
         | 
| 152 | 
            +
                - path: hydra_plugins.hydra_colorlog.conf
         | 
| 153 | 
            +
                  schema: pkg
         | 
| 154 | 
            +
                  provider: hydra-colorlog
         | 
| 155 | 
            +
                - path: /optimum-benchmark/examples/energy_star
         | 
| 156 | 
            +
                  schema: file
         | 
| 157 | 
            +
                  provider: command-line
         | 
| 158 | 
            +
                - path: ''
         | 
| 159 | 
            +
                  schema: structured
         | 
| 160 | 
            +
                  provider: schema
         | 
| 161 | 
            +
                output_dir: /runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-30-04
         | 
| 162 | 
            +
                choices:
         | 
| 163 | 
            +
                  benchmark: energy_star
         | 
| 164 | 
            +
                  launcher: process
         | 
| 165 | 
            +
                  backend: pytorch
         | 
| 166 | 
            +
                  hydra/env: default
         | 
| 167 | 
            +
                  hydra/callbacks: null
         | 
| 168 | 
            +
                  hydra/job_logging: colorlog
         | 
| 169 | 
            +
                  hydra/hydra_logging: colorlog
         | 
| 170 | 
            +
                  hydra/hydra_help: default
         | 
| 171 | 
            +
                  hydra/help: default
         | 
| 172 | 
            +
                  hydra/sweeper: basic
         | 
| 173 | 
            +
                  hydra/launcher: basic
         | 
| 174 | 
            +
                  hydra/output: default
         | 
| 175 | 
            +
              verbose: false
         | 
    	
        runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-30-04/.hydra/overrides.yaml
    ADDED
    
    | @@ -0,0 +1,2 @@ | |
|  | |
|  | 
|  | |
| 1 | 
            +
            - backend.model=meta-llama/Llama-3.1-8B-Instruct
         | 
| 2 | 
            +
            - backend.processor=meta-llama/Llama-3.1-8B-Instruct
         | 
    	
        runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-30-04/cli.log
    ADDED
    
    | @@ -0,0 +1,17 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            [2024-10-23 20:30:07,112][launcher][INFO] - ََAllocating process launcher
         | 
| 2 | 
            +
            [2024-10-23 20:30:07,113][process][INFO] - 	+ Setting multiprocessing start method to spawn.
         | 
| 3 | 
            +
            [2024-10-23 20:30:07,123][process][INFO] - 	+ Launched benchmark in isolated process 421.
         | 
| 4 | 
            +
            [PROC-0][2024-10-23 20:30:09,639][datasets][INFO] - PyTorch version 2.4.0 available.
         | 
| 5 | 
            +
            [PROC-0][2024-10-23 20:30:10,573][backend][INFO] - َAllocating pytorch backend
         | 
| 6 | 
            +
            [PROC-0][2024-10-23 20:30:10,573][backend][INFO] - 	+ Setting random seed to 42
         | 
| 7 | 
            +
            [PROC-0][2024-10-23 20:30:11,062][pytorch][INFO] - 	+ Using AutoModel class AutoModelForCausalLM
         | 
| 8 | 
            +
            [PROC-0][2024-10-23 20:30:11,062][pytorch][INFO] - 	+ Creating backend temporary directory
         | 
| 9 | 
            +
            [PROC-0][2024-10-23 20:30:11,062][pytorch][INFO] - 	+ Loading model with random weights
         | 
| 10 | 
            +
            [PROC-0][2024-10-23 20:30:11,062][pytorch][INFO] - 	+ Creating no weights model
         | 
| 11 | 
            +
            [PROC-0][2024-10-23 20:30:11,063][pytorch][INFO] - 	+ Creating no weights model directory
         | 
| 12 | 
            +
            [PROC-0][2024-10-23 20:30:11,063][pytorch][INFO] - 	+ Creating no weights model state dict
         | 
| 13 | 
            +
            [PROC-0][2024-10-23 20:30:11,084][pytorch][INFO] - 	+ Saving no weights model safetensors
         | 
| 14 | 
            +
            [PROC-0][2024-10-23 20:30:11,085][pytorch][INFO] - 	+ Saving no weights model pretrained config
         | 
| 15 | 
            +
            [PROC-0][2024-10-23 20:30:11,085][pytorch][INFO] - 	+ Loading no weights AutoModel
         | 
| 16 | 
            +
            [PROC-0][2024-10-23 20:30:11,086][pytorch][INFO] - 	+ Loading model directly on device: cuda
         | 
| 17 | 
            +
            [2024-10-23 20:30:12,070][experiment][ERROR] - Error during experiment
         | 
    	
        runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-30-04/error.log
    ADDED
    
    | @@ -0,0 +1,50 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            Error executing job with overrides: ['backend.model=meta-llama/Llama-3.1-8B-Instruct', 'backend.processor=meta-llama/Llama-3.1-8B-Instruct']
         | 
| 2 | 
            +
            Traceback (most recent call last):
         | 
| 3 | 
            +
              File "/optimum-benchmark/optimum_benchmark/cli.py", line 65, in benchmark_cli
         | 
| 4 | 
            +
                benchmark_report: BenchmarkReport = launch(experiment_config=experiment_config)
         | 
| 5 | 
            +
              File "/optimum-benchmark/optimum_benchmark/experiment.py", line 102, in launch
         | 
| 6 | 
            +
                raise error
         | 
| 7 | 
            +
              File "/optimum-benchmark/optimum_benchmark/experiment.py", line 90, in launch
         | 
| 8 | 
            +
                report = launcher.launch(run, experiment_config.benchmark, experiment_config.backend)
         | 
| 9 | 
            +
              File "/optimum-benchmark/optimum_benchmark/launchers/process/launcher.py", line 47, in launch
         | 
| 10 | 
            +
                while not process_context.join():
         | 
| 11 | 
            +
              File "/opt/conda/lib/python3.9/site-packages/torch/multiprocessing/spawn.py", line 189, in join
         | 
| 12 | 
            +
                raise ProcessRaisedException(msg, error_index, failed_process.pid)
         | 
| 13 | 
            +
            torch.multiprocessing.spawn.ProcessRaisedException: 
         | 
| 14 | 
            +
             | 
| 15 | 
            +
            -- Process 0 terminated with the following error:
         | 
| 16 | 
            +
            Traceback (most recent call last):
         | 
| 17 | 
            +
              File "/opt/conda/lib/python3.9/site-packages/torch/multiprocessing/spawn.py", line 76, in _wrap
         | 
| 18 | 
            +
                fn(i, *args)
         | 
| 19 | 
            +
              File "/optimum-benchmark/optimum_benchmark/launchers/process/launcher.py", line 63, in entrypoint
         | 
| 20 | 
            +
                worker_output = worker(*worker_args)
         | 
| 21 | 
            +
              File "/optimum-benchmark/optimum_benchmark/experiment.py", line 55, in run
         | 
| 22 | 
            +
                backend: Backend = backend_factory(backend_config)
         | 
| 23 | 
            +
              File "/optimum-benchmark/optimum_benchmark/backends/pytorch/backend.py", line 81, in __init__
         | 
| 24 | 
            +
                self.load_model_with_no_weights()
         | 
| 25 | 
            +
              File "/optimum-benchmark/optimum_benchmark/backends/pytorch/backend.py", line 246, in load_model_with_no_weights
         | 
| 26 | 
            +
                self.load_model_from_pretrained()
         | 
| 27 | 
            +
              File "/optimum-benchmark/optimum_benchmark/backends/pytorch/backend.py", line 204, in load_model_from_pretrained
         | 
| 28 | 
            +
                self.pretrained_model = self.automodel_class.from_pretrained(
         | 
| 29 | 
            +
              File "/opt/conda/lib/python3.9/site-packages/transformers/models/auto/auto_factory.py", line 564, in from_pretrained
         | 
| 30 | 
            +
                return model_class.from_pretrained(
         | 
| 31 | 
            +
              File "/opt/conda/lib/python3.9/site-packages/transformers/modeling_utils.py", line 3810, in from_pretrained
         | 
| 32 | 
            +
                model = cls(config, *model_args, **model_kwargs)
         | 
| 33 | 
            +
              File "/opt/conda/lib/python3.9/site-packages/transformers/models/llama/modeling_llama.py", line 1116, in __init__
         | 
| 34 | 
            +
                self.model = LlamaModel(config)
         | 
| 35 | 
            +
              File "/opt/conda/lib/python3.9/site-packages/transformers/models/llama/modeling_llama.py", line 902, in __init__
         | 
| 36 | 
            +
                [LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
         | 
| 37 | 
            +
              File "/opt/conda/lib/python3.9/site-packages/transformers/models/llama/modeling_llama.py", line 902, in <listcomp>
         | 
| 38 | 
            +
                [LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
         | 
| 39 | 
            +
              File "/opt/conda/lib/python3.9/site-packages/transformers/models/llama/modeling_llama.py", line 691, in __init__
         | 
| 40 | 
            +
                self.mlp = LlamaMLP(config)
         | 
| 41 | 
            +
              File "/opt/conda/lib/python3.9/site-packages/transformers/models/llama/modeling_llama.py", line 287, in __init__
         | 
| 42 | 
            +
                self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
         | 
| 43 | 
            +
              File "/opt/conda/lib/python3.9/site-packages/torch/nn/modules/linear.py", line 99, in __init__
         | 
| 44 | 
            +
                self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs))
         | 
| 45 | 
            +
              File "/opt/conda/lib/python3.9/site-packages/torch/utils/_device.py", line 79, in __torch_function__
         | 
| 46 | 
            +
                return func(*args, **kwargs)
         | 
| 47 | 
            +
            torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 224.00 MiB. GPU 0 has a total capacity of 22.19 GiB of which 69.50 MiB is free. Process 28976 has 22.12 GiB memory in use. Of the allocated memory 21.83 GiB is allocated by PyTorch, and 1.24 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation.  See documentation for Memory Management  (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
         | 
| 48 | 
            +
             | 
| 49 | 
            +
             | 
| 50 | 
            +
            Set the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.
         | 
    	
        runs/text_generation/meta-llama/Llama-3.1-8B-Instruct/2024-10-23-20-30-04/experiment_config.json
    ADDED
    
    | @@ -0,0 +1,110 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "experiment_name": "text_generation",
         | 
| 3 | 
            +
                "backend": {
         | 
| 4 | 
            +
                    "name": "pytorch",
         | 
| 5 | 
            +
                    "version": "2.4.0",
         | 
| 6 | 
            +
                    "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
         | 
| 7 | 
            +
                    "task": "text-generation",
         | 
| 8 | 
            +
                    "model": "meta-llama/Llama-3.1-8B-Instruct",
         | 
| 9 | 
            +
                    "processor": "meta-llama/Llama-3.1-8B-Instruct",
         | 
| 10 | 
            +
                    "library": "transformers",
         | 
| 11 | 
            +
                    "device": "cuda",
         | 
| 12 | 
            +
                    "device_ids": "0",
         | 
| 13 | 
            +
                    "seed": 42,
         | 
| 14 | 
            +
                    "inter_op_num_threads": null,
         | 
| 15 | 
            +
                    "intra_op_num_threads": null,
         | 
| 16 | 
            +
                    "hub_kwargs": {
         | 
| 17 | 
            +
                        "revision": "main",
         | 
| 18 | 
            +
                        "force_download": false,
         | 
| 19 | 
            +
                        "local_files_only": false,
         | 
| 20 | 
            +
                        "trust_remote_code": true
         | 
| 21 | 
            +
                    },
         | 
| 22 | 
            +
                    "no_weights": true,
         | 
| 23 | 
            +
                    "device_map": null,
         | 
| 24 | 
            +
                    "torch_dtype": null,
         | 
| 25 | 
            +
                    "amp_autocast": false,
         | 
| 26 | 
            +
                    "amp_dtype": null,
         | 
| 27 | 
            +
                    "eval_mode": true,
         | 
| 28 | 
            +
                    "to_bettertransformer": false,
         | 
| 29 | 
            +
                    "low_cpu_mem_usage": null,
         | 
| 30 | 
            +
                    "attn_implementation": null,
         | 
| 31 | 
            +
                    "cache_implementation": null,
         | 
| 32 | 
            +
                    "torch_compile": false,
         | 
| 33 | 
            +
                    "torch_compile_config": {},
         | 
| 34 | 
            +
                    "quantization_scheme": null,
         | 
| 35 | 
            +
                    "quantization_config": {},
         | 
| 36 | 
            +
                    "deepspeed_inference": false,
         | 
| 37 | 
            +
                    "deepspeed_inference_config": {},
         | 
| 38 | 
            +
                    "peft_type": null,
         | 
| 39 | 
            +
                    "peft_config": {}
         | 
| 40 | 
            +
                },
         | 
| 41 | 
            +
                "launcher": {
         | 
| 42 | 
            +
                    "name": "process",
         | 
| 43 | 
            +
                    "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
         | 
| 44 | 
            +
                    "device_isolation": false,
         | 
| 45 | 
            +
                    "device_isolation_action": "warn",
         | 
| 46 | 
            +
                    "start_method": "spawn"
         | 
| 47 | 
            +
                },
         | 
| 48 | 
            +
                "benchmark": {
         | 
| 49 | 
            +
                    "name": "energy_star",
         | 
| 50 | 
            +
                    "_target_": "optimum_benchmark.benchmarks.energy_star.benchmark.EnergyStarBenchmark",
         | 
| 51 | 
            +
                    "dataset_name": "EnergyStarAI/text_generation",
         | 
| 52 | 
            +
                    "dataset_config": "",
         | 
| 53 | 
            +
                    "dataset_split": "train",
         | 
| 54 | 
            +
                    "num_samples": 1000,
         | 
| 55 | 
            +
                    "input_shapes": {
         | 
| 56 | 
            +
                        "batch_size": 1
         | 
| 57 | 
            +
                    },
         | 
| 58 | 
            +
                    "text_column_name": "text",
         | 
| 59 | 
            +
                    "truncation": true,
         | 
| 60 | 
            +
                    "max_length": -1,
         | 
| 61 | 
            +
                    "dataset_prefix1": "",
         | 
| 62 | 
            +
                    "dataset_prefix2": "",
         | 
| 63 | 
            +
                    "t5_task": "",
         | 
| 64 | 
            +
                    "image_column_name": "image",
         | 
| 65 | 
            +
                    "resize": false,
         | 
| 66 | 
            +
                    "question_column_name": "question",
         | 
| 67 | 
            +
                    "context_column_name": "context",
         | 
| 68 | 
            +
                    "sentence1_column_name": "sentence1",
         | 
| 69 | 
            +
                    "sentence2_column_name": "sentence2",
         | 
| 70 | 
            +
                    "audio_column_name": "audio",
         | 
| 71 | 
            +
                    "iterations": 10,
         | 
| 72 | 
            +
                    "warmup_runs": 10,
         | 
| 73 | 
            +
                    "energy": true,
         | 
| 74 | 
            +
                    "forward_kwargs": {},
         | 
| 75 | 
            +
                    "generate_kwargs": {
         | 
| 76 | 
            +
                        "max_new_tokens": 10,
         | 
| 77 | 
            +
                        "min_new_tokens": 10
         | 
| 78 | 
            +
                    },
         | 
| 79 | 
            +
                    "call_kwargs": {}
         | 
| 80 | 
            +
                },
         | 
| 81 | 
            +
                "environment": {
         | 
| 82 | 
            +
                    "cpu": " AMD EPYC 7R32",
         | 
| 83 | 
            +
                    "cpu_count": 48,
         | 
| 84 | 
            +
                    "cpu_ram_mb": 200472.73984,
         | 
| 85 | 
            +
                    "system": "Linux",
         | 
| 86 | 
            +
                    "machine": "x86_64",
         | 
| 87 | 
            +
                    "platform": "Linux-5.10.192-183.736.amzn2.x86_64-x86_64-with-glibc2.35",
         | 
| 88 | 
            +
                    "processor": "x86_64",
         | 
| 89 | 
            +
                    "python_version": "3.9.20",
         | 
| 90 | 
            +
                    "gpu": [
         | 
| 91 | 
            +
                        "NVIDIA A10G"
         | 
| 92 | 
            +
                    ],
         | 
| 93 | 
            +
                    "gpu_count": 1,
         | 
| 94 | 
            +
                    "gpu_vram_mb": 24146608128,
         | 
| 95 | 
            +
                    "optimum_benchmark_version": "0.2.0",
         | 
| 96 | 
            +
                    "optimum_benchmark_commit": null,
         | 
| 97 | 
            +
                    "transformers_version": "4.44.0",
         | 
| 98 | 
            +
                    "transformers_commit": null,
         | 
| 99 | 
            +
                    "accelerate_version": "0.33.0",
         | 
| 100 | 
            +
                    "accelerate_commit": null,
         | 
| 101 | 
            +
                    "diffusers_version": "0.30.0",
         | 
| 102 | 
            +
                    "diffusers_commit": null,
         | 
| 103 | 
            +
                    "optimum_version": null,
         | 
| 104 | 
            +
                    "optimum_commit": null,
         | 
| 105 | 
            +
                    "timm_version": null,
         | 
| 106 | 
            +
                    "timm_commit": null,
         | 
| 107 | 
            +
                    "peft_version": null,
         | 
| 108 | 
            +
                    "peft_commit": null
         | 
| 109 | 
            +
                }
         | 
| 110 | 
            +
            }
         | 

