metadata
			license: apache-2.0
library_name: transformers
tags:
  - mergekit
  - merge
base_model:
  - CultriX/Qwen2.5-14B-MergeStock
  - CultriX/SeQwence-14Bv1
  - allknowingroger/QwenStock1-14B
  - CultriX/Qwen2.5-14B-Wernicke
  - allknowingroger/QwenStock2-14B
  - allknowingroger/Qwenslerp2-14B
  - CultriX/Qwen2.5-14B-MegaMerge-pt2
  - CultriX/Qwestion-14B
model-index:
  - name: QwenStock3-14B
    results:
      - task:
          type: text-generation
          name: Text Generation
        dataset:
          name: IFEval (0-Shot)
          type: HuggingFaceH4/ifeval
          args:
            num_few_shot: 0
        metrics:
          - type: inst_level_strict_acc and prompt_level_strict_acc
            value: 56.15
            name: strict accuracy
        source:
          url: >-
            https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=allknowingroger/QwenStock3-14B
          name: Open LLM Leaderboard
      - task:
          type: text-generation
          name: Text Generation
        dataset:
          name: BBH (3-Shot)
          type: BBH
          args:
            num_few_shot: 3
        metrics:
          - type: acc_norm
            value: 50.58
            name: normalized accuracy
        source:
          url: >-
            https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=allknowingroger/QwenStock3-14B
          name: Open LLM Leaderboard
      - task:
          type: text-generation
          name: Text Generation
        dataset:
          name: MATH Lvl 5 (4-Shot)
          type: hendrycks/competition_math
          args:
            num_few_shot: 4
        metrics:
          - type: exact_match
            value: 29.68
            name: exact match
        source:
          url: >-
            https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=allknowingroger/QwenStock3-14B
          name: Open LLM Leaderboard
      - task:
          type: text-generation
          name: Text Generation
        dataset:
          name: GPQA (0-shot)
          type: Idavidrein/gpqa
          args:
            num_few_shot: 0
        metrics:
          - type: acc_norm
            value: 17.11
            name: acc_norm
        source:
          url: >-
            https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=allknowingroger/QwenStock3-14B
          name: Open LLM Leaderboard
      - task:
          type: text-generation
          name: Text Generation
        dataset:
          name: MuSR (0-shot)
          type: TAUR-Lab/MuSR
          args:
            num_few_shot: 0
        metrics:
          - type: acc_norm
            value: 19.11
            name: acc_norm
        source:
          url: >-
            https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=allknowingroger/QwenStock3-14B
          name: Open LLM Leaderboard
      - task:
          type: text-generation
          name: Text Generation
        dataset:
          name: MMLU-PRO (5-shot)
          type: TIGER-Lab/MMLU-Pro
          config: main
          split: test
          args:
            num_few_shot: 5
        metrics:
          - type: acc
            value: 49.2
            name: accuracy
        source:
          url: >-
            https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=allknowingroger/QwenStock3-14B
          name: Open LLM Leaderboard
merge
This is a merge of pre-trained language models created using mergekit.
Merge Details
Merge Method
This model was merged using the Model Stock merge method using CultriX/SeQwence-14Bv1 as a base.
Models Merged
The following models were included in the merge:
- CultriX/Qwen2.5-14B-MergeStock
- allknowingroger/QwenStock1-14B
- CultriX/Qwen2.5-14B-Wernicke
- allknowingroger/QwenStock2-14B
- allknowingroger/Qwenslerp2-14B
- CultriX/Qwen2.5-14B-MegaMerge-pt2
- CultriX/Qwestion-14B
Configuration
The following YAML configuration was used to produce this model:
models:
  - model: CultriX/Qwen2.5-14B-MergeStock
  - model: CultriX/SeQwence-14Bv1
  - model: allknowingroger/Qwenslerp2-14B
  - model: allknowingroger/QwenStock1-14B
  - model: allknowingroger/QwenStock2-14B
  - model: CultriX/Qwestion-14B
  - model: CultriX/Qwen2.5-14B-Wernicke
  - model: CultriX/Qwen2.5-14B-MegaMerge-pt2
base_model: CultriX/SeQwence-14Bv1
merge_method: model_stock
parameters:
  normalize: true
dtype: bfloat16
tokenizer_source: CultriX/SeQwence-14Bv1
Open LLM Leaderboard Evaluation Results
Detailed results can be found here
| Metric | Value | 
|---|---|
| Avg. | 36.97 | 
| IFEval (0-Shot) | 56.15 | 
| BBH (3-Shot) | 50.58 | 
| MATH Lvl 5 (4-Shot) | 29.68 | 
| GPQA (0-shot) | 17.11 | 
| MuSR (0-shot) | 19.11 | 
| MMLU-PRO (5-shot) | 49.20 |