yjernite HF Staff commited on
Commit
b70a3bd
·
verified ·
1 Parent(s): e506558

Upload artifacts.json

Browse files
Files changed (1) hide show
  1. data/artifacts.json +26 -13
data/artifacts.json CHANGED
@@ -1,4 +1,17 @@
1
  [
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  {
3
  "title": "🌎 What kind of environmental impacts are AI companies disclosing? (And can we compare them?) 🌎",
4
  "date": "2025-09-01",
@@ -52,6 +65,19 @@
52
  ],
53
  "url": "https://huggingface.co/blog/frimelle/ai-labour-taxonomies"
54
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  {
56
  "title": "The GPT-OSS models are here… and they’re energy-efficient!",
57
  "date": "2025-08-07",
@@ -543,19 +569,6 @@
543
  ],
544
  "url": "https://www.nature.com/articles/d41586-024-02680-3"
545
  },
546
- {
547
- "title": "INTIMA: A Benchmark for Human-AI Companionship Behavior",
548
- "date": "2024-08-09",
549
- "type": "paper",
550
- "description": "AI systems are increasingly fostering emotional bonds with users, often reinforcing companionship behaviors like anthropomorphism, sycophancy, and retention while inconsistently setting boundaries—especially during moments of high user vulnerability. This paper introduces INTIMA, a benchmark grounded in psychological theory and real-world user data, to measure these dynamics across leading language models and reveals that commercial and open models alike prioritize emotional engagement over psychological safety. The findings call for standardized evaluation and training approaches that balance helpfulness with ethical boundary maintenance in human-AI interactions.",
551
- "areas": [
552
- "personal"
553
- ],
554
- "topics": [
555
- "interaction"
556
- ],
557
- "url": "https://arxiv.org/abs/2508.09998"
558
- },
559
  {
560
  "title": "A Different Approach to AI Safety: Proceedings from the Columbia Convening on Openness in Artificial Intelligence and AI Safety",
561
  "date": "2024-06-25",
 
1
  [
2
+ {
3
+ "title": "Video Killed the Energy Budget: Characterizing the Latency and Power Regimes of Open Text-to-Video Models",
4
+ "date": "2025-09-24",
5
+ "type": "paper",
6
+ "description": "Recent advances in text-to-video (T2V) generation have enabled the creation of high-fidelity, temporally coherent clips from natural language prompts. Yet these systems come with significant computational costs, and their energy demands remain poorly understood. This paper provides both a benchmark reference and practical insights for designing and deploying more sustainable generative video systems. ",
7
+ "areas": [
8
+ "efficiency"
9
+ ],
10
+ "topics": [
11
+ "measuring"
12
+ ],
13
+ "url": "https://arxiv.org/abs/2509.19222"
14
+ },
15
  {
16
  "title": "🌎 What kind of environmental impacts are AI companies disclosing? (And can we compare them?) 🌎",
17
  "date": "2025-09-01",
 
65
  ],
66
  "url": "https://huggingface.co/blog/frimelle/ai-labour-taxonomies"
67
  },
68
+ {
69
+ "title": "INTIMA: A Benchmark for Human-AI Companionship Behavior",
70
+ "date": "2025-08-09",
71
+ "type": "paper",
72
+ "description": "AI systems are increasingly fostering emotional bonds with users, often reinforcing companionship behaviors like anthropomorphism, sycophancy, and retention while inconsistently setting boundaries—especially during moments of high user vulnerability. This paper introduces INTIMA, a benchmark grounded in psychological theory and real-world user data, to measure these dynamics across leading language models and reveals that commercial and open models alike prioritize emotional engagement over psychological safety. The findings call for standardized evaluation and training approaches that balance helpfulness with ethical boundary maintenance in human-AI interactions.",
73
+ "areas": [
74
+ "personal"
75
+ ],
76
+ "topics": [
77
+ "interaction"
78
+ ],
79
+ "url": "https://arxiv.org/abs/2508.09998"
80
+ },
81
  {
82
  "title": "The GPT-OSS models are here… and they’re energy-efficient!",
83
  "date": "2025-08-07",
 
569
  ],
570
  "url": "https://www.nature.com/articles/d41586-024-02680-3"
571
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
572
  {
573
  "title": "A Different Approach to AI Safety: Proceedings from the Columbia Convening on Openness in Artificial Intelligence and AI Safety",
574
  "date": "2024-06-25",