Nora Petrova
		
	commited on
		
		
					Commit 
							
							·
						
						6688953
	
1
								Parent(s):
							
							db3726a
								
Initial dataset upload: HUMAINE evaluation data with CSV and Parquet formats
Browse files- .gitattributes +4 -0
- README.md +193 -3
- conversations_metadata_dataset.csv +3 -0
- conversations_metadata_dataset.parquet +3 -0
- feedback_dataset.csv +3 -0
- feedback_dataset.parquet +3 -0
    	
        .gitattributes
    CHANGED
    
    | @@ -57,3 +57,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text | |
| 57 | 
             
            # Video files - compressed
         | 
| 58 | 
             
            *.mp4 filter=lfs diff=lfs merge=lfs -text
         | 
| 59 | 
             
            *.webm filter=lfs diff=lfs merge=lfs -text
         | 
|  | |
|  | |
|  | |
|  | 
|  | |
| 57 | 
             
            # Video files - compressed
         | 
| 58 | 
             
            *.mp4 filter=lfs diff=lfs merge=lfs -text
         | 
| 59 | 
             
            *.webm filter=lfs diff=lfs merge=lfs -text
         | 
| 60 | 
            +
            feedback_dataset.csv filter=lfs diff=lfs merge=lfs -text
         | 
| 61 | 
            +
            conversations_metadata_dataset.parquet filter=lfs diff=lfs merge=lfs -text
         | 
| 62 | 
            +
            feedback_dataset.parquet filter=lfs diff=lfs merge=lfs -text
         | 
| 63 | 
            +
            conversations_metadata_dataset.csv filter=lfs diff=lfs merge=lfs -text
         | 
    	
        README.md
    CHANGED
    
    | @@ -1,3 +1,193 @@ | |
| 1 | 
            -
            ---
         | 
| 2 | 
            -
            license: mit
         | 
| 3 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            ---
         | 
| 2 | 
            +
            license: mit
         | 
| 3 | 
            +
            task_categories:
         | 
| 4 | 
            +
            - text-classification
         | 
| 5 | 
            +
            - question-answering
         | 
| 6 | 
            +
            language:
         | 
| 7 | 
            +
            - en
         | 
| 8 | 
            +
            tags:
         | 
| 9 | 
            +
            - human-ai-interaction
         | 
| 10 | 
            +
            - model-evaluation
         | 
| 11 | 
            +
            - preference-learning
         | 
| 12 | 
            +
            - conversational-ai
         | 
| 13 | 
            +
            pretty_name: HUMAINE Human-AI Interaction Evaluation Dataset
         | 
| 14 | 
            +
            size_categories:
         | 
| 15 | 
            +
            - 100K<n<1M
         | 
| 16 | 
            +
            dataset_info:
         | 
| 17 | 
            +
              features:
         | 
| 18 | 
            +
                - name: conversations_metadata
         | 
| 19 | 
            +
                  struct:
         | 
| 20 | 
            +
                    - name: conversation_id
         | 
| 21 | 
            +
                      dtype: int64
         | 
| 22 | 
            +
                    - name: model_name
         | 
| 23 | 
            +
                      dtype: string
         | 
| 24 | 
            +
                    - name: task_type
         | 
| 25 | 
            +
                      dtype: string
         | 
| 26 | 
            +
                    - name: domain
         | 
| 27 | 
            +
                      dtype: string
         | 
| 28 | 
            +
                    - name: task_complexity_score
         | 
| 29 | 
            +
                      dtype: int64
         | 
| 30 | 
            +
                    - name: goal_achievement_score
         | 
| 31 | 
            +
                      dtype: int64
         | 
| 32 | 
            +
                    - name: user_engagement_score
         | 
| 33 | 
            +
                      dtype: int64
         | 
| 34 | 
            +
                    - name: total_messages
         | 
| 35 | 
            +
                      dtype: int64
         | 
| 36 | 
            +
                - name: feedback_comparisons
         | 
| 37 | 
            +
                  struct:
         | 
| 38 | 
            +
                    - name: conversation_id
         | 
| 39 | 
            +
                      dtype: int64
         | 
| 40 | 
            +
                    - name: model_a
         | 
| 41 | 
            +
                      dtype: string
         | 
| 42 | 
            +
                    - name: model_b
         | 
| 43 | 
            +
                      dtype: string
         | 
| 44 | 
            +
                    - name: metric
         | 
| 45 | 
            +
                      dtype: string
         | 
| 46 | 
            +
                    - name: choice
         | 
| 47 | 
            +
                      dtype: string
         | 
| 48 | 
            +
                    - name: age
         | 
| 49 | 
            +
                      dtype: int64
         | 
| 50 | 
            +
                    - name: ethnic_group
         | 
| 51 | 
            +
                      dtype: string
         | 
| 52 | 
            +
                    - name: political_affilation
         | 
| 53 | 
            +
                      dtype: string
         | 
| 54 | 
            +
                    - name: country_of_residence
         | 
| 55 | 
            +
                      dtype: string
         | 
| 56 | 
            +
            ---
         | 
| 57 | 
            +
             | 
| 58 | 
            +
            # HUMAINE: Human-AI Interaction Evaluation Dataset
         | 
| 59 | 
            +
             | 
| 60 | 
            +
            ## Dataset Description
         | 
| 61 | 
            +
             | 
| 62 | 
            +
            ### Dataset Summary
         | 
| 63 | 
            +
             | 
| 64 | 
            +
            The HUMAINE dataset contains human evaluations of AI model interactions across diverse demographic groups and conversation contexts. This dataset powers the [HUMAINE Leaderboard](https://huggingface.co/spaces/ProlificAI/humaine-leaderboard), providing insights into how different AI models perform across various user populations and use cases.
         | 
| 65 | 
            +
             | 
| 66 | 
            +
            The dataset consists of two main components:
         | 
| 67 | 
            +
            - **Conversations Metadata**: 40,332 conversations with task complexity, achievement, and engagement scores
         | 
| 68 | 
            +
            - **Feedback Comparisons**: 105,220 pairwise model comparisons across multiple evaluation metrics
         | 
| 69 | 
            +
             | 
| 70 | 
            +
            **Note**: There may be a slight discrepancy between the numbers in this dataset and the leaderboard app due to changes in consent related to data release and the post-processing steps involved in preparing this dataset.
         | 
| 71 | 
            +
             | 
| 72 | 
            +
            ### Supported Tasks
         | 
| 73 | 
            +
             | 
| 74 | 
            +
            - Model performance evaluation
         | 
| 75 | 
            +
            - Demographic bias analysis
         | 
| 76 | 
            +
            - Preference learning
         | 
| 77 | 
            +
            - Human-AI interaction research
         | 
| 78 | 
            +
            - Conversational AI benchmarking
         | 
| 79 | 
            +
             | 
| 80 | 
            +
            ## Dataset Structure
         | 
| 81 | 
            +
             | 
| 82 | 
            +
            ### Data Files
         | 
| 83 | 
            +
             | 
| 84 | 
            +
            The dataset contains two CSV files:
         | 
| 85 | 
            +
             | 
| 86 | 
            +
            1. **`conversations_metadata_dataset.csv`** (40,332 rows)
         | 
| 87 | 
            +
               - Metadata about individual conversations between users and AI models
         | 
| 88 | 
            +
               - Includes task types, domains, and performance scores
         | 
| 89 | 
            +
             | 
| 90 | 
            +
            2. **`feedback_dataset.csv`** (105,220 rows)
         | 
| 91 | 
            +
               - Pairwise comparisons between different AI models
         | 
| 92 | 
            +
               - Includes demographic information and preference choices
         | 
| 93 | 
            +
             | 
| 94 | 
            +
            ### Data Fields
         | 
| 95 | 
            +
             | 
| 96 | 
            +
            #### Conversations Metadata
         | 
| 97 | 
            +
            - `conversation_id`: Unique identifier for the conversation
         | 
| 98 | 
            +
            - `model_name`: Name of the AI model used
         | 
| 99 | 
            +
            - `task_type`: Type of task (information_seeking, technical_assistance, etc.)
         | 
| 100 | 
            +
            - `domain`: Domain of the conversation (health_medical, technology, travel, etc.)
         | 
| 101 | 
            +
            - `task_complexity_score`: Complexity rating (1-5)
         | 
| 102 | 
            +
            - `goal_achievement_score`: How well the goal was achieved (1-5)
         | 
| 103 | 
            +
            - `user_engagement_score`: User engagement level (1-5)
         | 
| 104 | 
            +
            - `total_messages`: Total number of messages in the conversation
         | 
| 105 | 
            +
             | 
| 106 | 
            +
            #### Feedback Comparisons
         | 
| 107 | 
            +
            - `conversation_id`: Unique identifier linking to conversation metadata
         | 
| 108 | 
            +
            - `model_a`: First model in the comparison
         | 
| 109 | 
            +
            - `model_b`: Second model in the comparison
         | 
| 110 | 
            +
            - `metric`: Evaluation metric (overall_winner, trust_ethics_and_safety, core_task_performance_and_reasoning, interaction_fluidity_and_adaptiveness)
         | 
| 111 | 
            +
            - `choice`: User's choice (A, B, or tie)
         | 
| 112 | 
            +
            - `age`: Age of the evaluator
         | 
| 113 | 
            +
            - `ethnic_group`: Ethnic group of the evaluator
         | 
| 114 | 
            +
            - `political_affilation`: Political affiliation of the evaluator
         | 
| 115 | 
            +
            - `country_of_residence`: Country of residence of the evaluator
         | 
| 116 | 
            +
             | 
| 117 | 
            +
            ## Usage
         | 
| 118 | 
            +
             | 
| 119 | 
            +
            This dataset contains two CSV files that can be joined on the `conversation_id` field:
         | 
| 120 | 
            +
            - `conversations_metadata_dataset.csv`: Metadata about each conversation
         | 
| 121 | 
            +
            - `feedback_dataset.csv`: Pairwise model comparisons with demographic information
         | 
| 122 | 
            +
             | 
| 123 | 
            +
            Both files are included in this single dataset repository and can be accessed using HuggingFace's dataset loading utilities.
         | 
| 124 | 
            +
             | 
| 125 | 
            +
            ## Dataset Creation
         | 
| 126 | 
            +
             | 
| 127 | 
            +
            ### Curation Rationale
         | 
| 128 | 
            +
             | 
| 129 | 
            +
            This dataset was created to address the lack of diverse, demographically-aware evaluation data for AI models. It captures real-world human preferences and interactions across different population groups, enabling more inclusive AI development.
         | 
| 130 | 
            +
             | 
| 131 | 
            +
            ### Source Data
         | 
| 132 | 
            +
             | 
| 133 | 
            +
            Data was collected through structured human evaluation tasks where participants:
         | 
| 134 | 
            +
            1. Engaged in conversations with various AI models
         | 
| 135 | 
            +
            2. Provided pairwise comparisons between model outputs
         | 
| 136 | 
            +
            3. Rated conversations on multiple quality dimensions (metrics)
         | 
| 137 | 
            +
             | 
| 138 | 
            +
            ### Annotations
         | 
| 139 | 
            +
             | 
| 140 | 
            +
            All annotations were provided by human evaluators through the Prolific platform, ensuring demographic diversity and high-quality feedback.
         | 
| 141 | 
            +
             | 
| 142 | 
            +
            ### Personal and Sensitive Information
         | 
| 143 | 
            +
             | 
| 144 | 
            +
            The dataset contains aggregated demographic information (age groups, ethnic groups, political affiliations, countries) but no personally identifiable information. All data has been anonymized and aggregated to protect participant privacy.
         | 
| 145 | 
            +
             | 
| 146 | 
            +
            ## Considerations for Using the Data
         | 
| 147 | 
            +
             | 
| 148 | 
            +
            ### Social Impact
         | 
| 149 | 
            +
             | 
| 150 | 
            +
            This dataset aims to promote more inclusive AI development by highlighting performance differences across demographic groups. It should be used to improve AI systems' fairness and effectiveness for all users.
         | 
| 151 | 
            +
             | 
| 152 | 
            +
            ### Discussion of Biases
         | 
| 153 | 
            +
             | 
| 154 | 
            +
            While efforts were made to ensure demographic diversity, the dataset may still contain biases related to:
         | 
| 155 | 
            +
            - Geographic representation (primarily US and UK participants)
         | 
| 156 | 
            +
            - Self-selection bias in participant recruitment
         | 
| 157 | 
            +
            - Cultural and linguistic factors affecting evaluation criteria
         | 
| 158 | 
            +
             | 
| 159 | 
            +
            ### Other Known Limitations
         | 
| 160 | 
            +
             | 
| 161 | 
            +
            - Limited to English-language interactions
         | 
| 162 | 
            +
            - Demographic categories are self-reported
         | 
| 163 | 
            +
            - Temporal bias (models evaluated at specific points in time)
         | 
| 164 | 
            +
             | 
| 165 | 
            +
            ## Additional Information
         | 
| 166 | 
            +
             | 
| 167 | 
            +
            ### Dataset Curators
         | 
| 168 | 
            +
             | 
| 169 | 
            +
            This dataset was curated by the Prolific AI team as part of the HUMAINE (Human-AI Interaction Evaluation) project.
         | 
| 170 | 
            +
             | 
| 171 | 
            +
            ### Licensing Information
         | 
| 172 | 
            +
             | 
| 173 | 
            +
            This dataset is released under the MIT License.
         | 
| 174 | 
            +
             | 
| 175 | 
            +
            ### Citation Information
         | 
| 176 | 
            +
             | 
| 177 | 
            +
            ```bibtex
         | 
| 178 | 
            +
            @dataset{humaine2025,
         | 
| 179 | 
            +
              title={HUMAINE: Human-AI Interaction Evaluation Dataset},
         | 
| 180 | 
            +
              author={Prolific AI Team},
         | 
| 181 | 
            +
              year={2025},
         | 
| 182 | 
            +
              publisher={Hugging Face},
         | 
| 183 | 
            +
              url={https://huggingface.co/datasets/ProlificAI/humaine-evaluation-dataset}
         | 
| 184 | 
            +
            }
         | 
| 185 | 
            +
            ```
         | 
| 186 | 
            +
             | 
| 187 | 
            +
            ### Contributions
         | 
| 188 | 
            +
             | 
| 189 | 
            +
            Thanks to all the human evaluators who contributed their feedback to this project!
         | 
| 190 | 
            +
             | 
| 191 | 
            +
            ## Contact
         | 
| 192 | 
            +
             | 
| 193 | 
            +
            For questions or feedback about this dataset, please visit the [HUMAINE Leaderboard](https://huggingface.co/spaces/ProlificAI/humaine-leaderboard) or contact the Prolific AI team.
         | 
    	
        conversations_metadata_dataset.csv
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:b1b9936da135f061ea381ced9b562034edd9383f2c3e0b43228b6acee2e43d69
         | 
| 3 | 
            +
            size 2645059
         | 
    	
        conversations_metadata_dataset.parquet
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:957c0e3c11b768d158e66bb053ad5096608dc1f80fc36e69e36b035194dc13c8
         | 
| 3 | 
            +
            size 302115
         | 
    	
        feedback_dataset.csv
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:2b9dec54e5620c35aed0534cd1d3bed3620a556a74b182729a743d2ec3a5be1f
         | 
| 3 | 
            +
            size 11973022
         | 
    	
        feedback_dataset.parquet
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:bbca035715e13e879af5e0881df43d5e9407867860ebcfce6e3a165b8631acbf
         | 
| 3 | 
            +
            size 688645
         | 
