File size: 6,707 Bytes
6b0866b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 |
import json
from datasets import load_dataset
import verifiers as vf
def load_environment(
num_train_examples=7000,
num_eval_examples=1000,
**kwargs
):
"""
Environment for verifying complex JSON output from models.
The task requires models to:
1. Parse multi-question prompts
2. Generate valid JSON responses
3. Match the expected structure with correct keys and values
Rewards (no penalties, only positive rewards):
- Formatting (valid JSON dict): 0.33 if pass, 0 if fail
- All keys match: 0.33 if pass, 0 if fail
- Answer values match: 0.33 if pass, 0 if fail
Total max reward: ~1.0
"""
# Load dataset from HuggingFace
dataset = load_dataset("Delta-Vector/Tauri-Complex-JSON-Formatting", split="train")
# Map to expected format - keep verification_info as string to avoid schema issues
def format_example(example):
return {
"question": example["prompt"],
"info": {"verification_info": example["verification_info"]}, # Keep as dict with string
}
dataset = dataset.map(format_example, remove_columns=dataset.column_names)
# Split into train and eval
train_dataset = dataset.select(range(num_train_examples))
eval_dataset = dataset.select(range(num_train_examples, num_train_examples + num_eval_examples))
# Custom extract function to parse JSON from code blocks or raw text
def extract_json_from_completion(completion):
"""Extract JSON from completion, handling code blocks."""
if not completion:
return ""
# Get the last message content
if isinstance(completion, list) and len(completion) > 0:
content = completion[-1].get("content", "")
else:
content = str(completion)
# Try to extract from code blocks first (```json ... ``` or ``` ... ```)
import re
code_block_pattern = r"```(?:json)?\s*\n(.*?)\n```"
matches = re.findall(code_block_pattern, content, re.DOTALL)
if matches:
return matches[-1].strip() # Return last code block
# Otherwise return the content as-is
return content.strip()
# Use simple Parser with custom extract function
parser = vf.Parser(extract_fn=extract_json_from_completion)
def format_reward(completion, **kwargs) -> float:
"""
Reward for valid JSON formatting.
Returns 0.33 for valid JSON dict, 0 for invalid.
"""
try:
response = parser.parse_answer(completion) or ""
response = response.strip()
# Check if response is not empty
if not response:
return 0.0
# Try to parse as JSON
parsed = json.loads(response)
# Must be a dict (since ground truth is always a dict)
if not isinstance(parsed, dict):
return 0.0
return 0.33
except (json.JSONDecodeError, ValueError, TypeError):
return 0.0
def keys_match_reward(completion, info, **kwargs) -> float:
"""
Reward for matching keys in the JSON structure.
Returns 0.33 if all keys match, 0 otherwise.
"""
try:
response = parser.parse_answer(completion) or ""
response = response.strip()
parsed_response = json.loads(response)
# Parse ground truth from info
verification_info = json.loads(info["verification_info"])
ground_truth = verification_info["ground_truth"]
# Check if it's a dict
if not isinstance(parsed_response, dict):
return 0.0
# Get all keys from ground truth (recursively)
def get_all_keys(d, prefix=""):
keys = set()
if isinstance(d, dict):
for k, v in d.items():
full_key = f"{prefix}.{k}" if prefix else k
keys.add(full_key)
keys.update(get_all_keys(v, full_key))
return keys
expected_keys = get_all_keys(ground_truth)
actual_keys = get_all_keys(parsed_response)
# Check if keys match exactly
if expected_keys == actual_keys:
return 0.33
else:
return 0.0
except (json.JSONDecodeError, ValueError, AttributeError, TypeError):
return 0.0
def values_match_reward(completion, info, **kwargs) -> float:
"""
Reward for matching values in the JSON structure.
Returns 0.33 if all values match, 0 otherwise.
"""
try:
response = parser.parse_answer(completion) or ""
response = response.strip()
parsed_response = json.loads(response)
# Parse ground truth from info
verification_info = json.loads(info["verification_info"])
ground_truth = verification_info["ground_truth"]
# Deep comparison of values
def deep_compare(a, b):
if type(a) != type(b):
return False
if isinstance(a, dict):
if set(a.keys()) != set(b.keys()):
return False
return all(deep_compare(a[k], b[k]) for k in a.keys())
elif isinstance(a, list):
if len(a) != len(b):
return False
return all(deep_compare(a[i], b[i]) for i in range(len(a)))
else:
return a == b
if deep_compare(parsed_response, ground_truth):
return 0.33
else:
return 0.0
except (json.JSONDecodeError, ValueError, AttributeError, TypeError):
return 0.0
# Create rubric with all reward functions
rubric = vf.Rubric(
parser=parser,
funcs=[
format_reward,
keys_match_reward,
values_match_reward,
],
weights=[1.0, 1.0, 1.0] # Equal weights for all three criteria
)
# Return SingleTurnEnv since this is a one-shot task
# No system prompt - let the dataset prompt speak for itself
vf_env = vf.SingleTurnEnv(
dataset=train_dataset,
eval_dataset=eval_dataset,
parser=parser,
rubric=rubric,
)
return vf_env
|