Update index.html
Browse files- index.html +474 -19
index.html
CHANGED
|
@@ -1,19 +1,474 @@
|
|
| 1 |
-
<!
|
| 2 |
-
<html>
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="zh">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
+
<title>Hugging Face 2025年2月热门论文</title>
|
| 7 |
+
<style>
|
| 8 |
+
body {
|
| 9 |
+
font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif;
|
| 10 |
+
background-color: #f5f8fa;
|
| 11 |
+
color: #1a1a1a;
|
| 12 |
+
line-height: 1.5;
|
| 13 |
+
padding: 20px;
|
| 14 |
+
max-width: 1200px;
|
| 15 |
+
margin: 0 auto;
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
h1 {
|
| 19 |
+
color: #333;
|
| 20 |
+
text-align: center;
|
| 21 |
+
font-size: 28px;
|
| 22 |
+
margin-bottom: 30px;
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
.cards-container {
|
| 26 |
+
display: grid;
|
| 27 |
+
grid-template-columns: repeat(auto-fill, minmax(350px, 1fr));
|
| 28 |
+
gap: 25px;
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
.card {
|
| 32 |
+
background: white;
|
| 33 |
+
border-radius: 12px;
|
| 34 |
+
overflow: hidden;
|
| 35 |
+
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.08);
|
| 36 |
+
transition: transform 0.3s ease, box-shadow 0.3s ease;
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
.card:hover {
|
| 40 |
+
transform: translateY(-5px);
|
| 41 |
+
box-shadow: 0 10px 20px rgba(0, 0, 0, 0.12);
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
.card-header {
|
| 45 |
+
background-color: #FF9D00;
|
| 46 |
+
color: white;
|
| 47 |
+
padding: 12px 20px;
|
| 48 |
+
position: relative;
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
.card-vote {
|
| 52 |
+
position: absolute;
|
| 53 |
+
right: 15px;
|
| 54 |
+
top: 12px;
|
| 55 |
+
background: rgba(255, 255, 255, 0.25);
|
| 56 |
+
border-radius: 20px;
|
| 57 |
+
padding: 2px 10px;
|
| 58 |
+
font-size: 14px;
|
| 59 |
+
font-weight: bold;
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
.card-title {
|
| 63 |
+
font-size: 18px;
|
| 64 |
+
font-weight: 600;
|
| 65 |
+
margin: 0;
|
| 66 |
+
padding-right: 60px;
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
.card-body {
|
| 70 |
+
padding: 20px;
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
.card-image {
|
| 74 |
+
width: 100%;
|
| 75 |
+
height: 200px;
|
| 76 |
+
display: flex;
|
| 77 |
+
justify-content: center;
|
| 78 |
+
align-items: center;
|
| 79 |
+
overflow: hidden;
|
| 80 |
+
margin-bottom: 15px;
|
| 81 |
+
border-radius: 8px;
|
| 82 |
+
background-color: #f7f7f7;
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
.card-image img {
|
| 86 |
+
width: 100%;
|
| 87 |
+
height: 100%;
|
| 88 |
+
object-fit: cover;
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
.card-description {
|
| 92 |
+
font-size: 14px;
|
| 93 |
+
color: #444;
|
| 94 |
+
margin-bottom: 15px;
|
| 95 |
+
overflow: hidden;
|
| 96 |
+
display: -webkit-box;
|
| 97 |
+
-webkit-line-clamp: 6;
|
| 98 |
+
-webkit-box-orient: vertical;
|
| 99 |
+
height: 150px;
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
.card-link {
|
| 103 |
+
display: inline-block;
|
| 104 |
+
text-decoration: none;
|
| 105 |
+
background-color: #FF9D00;
|
| 106 |
+
color: white;
|
| 107 |
+
padding: 8px 16px;
|
| 108 |
+
border-radius: 6px;
|
| 109 |
+
font-weight: 500;
|
| 110 |
+
font-size: 14px;
|
| 111 |
+
transition: background-color 0.2s;
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
.card-link:hover {
|
| 115 |
+
background-color: #e58e00;
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
@media (max-width: 768px) {
|
| 119 |
+
.cards-container {
|
| 120 |
+
grid-template-columns: 1fr;
|
| 121 |
+
}
|
| 122 |
+
}
|
| 123 |
+
</style>
|
| 124 |
+
</head>
|
| 125 |
+
<body>
|
| 126 |
+
<div style="background-color: #f8f9fa; padding: 10px; border-radius: 6px; margin-bottom: 20px; text-align: center; color: #666;">
|
| 127 |
+
<p>⚠️ 注意:本页面内容由InternLM等LLM生成,内容可能有误,请以原始论文为准。</p>
|
| 128 |
+
</div>
|
| 129 |
+
<h1>🔥 Hugging Face 2025年2月热门论文 🚀</h1>
|
| 130 |
+
|
| 131 |
+
<div class="cards-container">
|
| 132 |
+
<!-- 论文卡片1 -->
|
| 133 |
+
<div class="card">
|
| 134 |
+
<div class="card-header">
|
| 135 |
+
<h3 class="card-title">SmolLM2: 当小变大 - 小型语言模型的数据中心训练</h3>
|
| 136 |
+
<span class="card-vote">193票</span>
|
| 137 |
+
</div>
|
| 138 |
+
<div class="card-body">
|
| 139 |
+
<div class="card-image">
|
| 140 |
+
<img src="https://cdn.vansin.top/picgo/20250227080524.png" alt="SmolLM2论文图示">
|
| 141 |
+
</div>
|
| 142 |
+
<div class="card-description">
|
| 143 |
+
由Hugging Face团队提出的SmolLM2是一个具有先进性能的"小型"(1.7B参数)语言模型。为了获得强大的性能,研究者在约11万亿个token的数据上对SmolLM2进行了过度训练,采用多阶段训练过程,混合网络文本与专业数学、代码和指令遵循数据。
|
| 144 |
+
</div>
|
| 145 |
+
<a href="https://huggingface.co/papers/2502.02737" class="card-link" target="_blank">阅读论文</a>
|
| 146 |
+
</div>
|
| 147 |
+
</div>
|
| 148 |
+
|
| 149 |
+
<!-- 论文卡片2 -->
|
| 150 |
+
<div class="card">
|
| 151 |
+
<div class="card-header">
|
| 152 |
+
<h3 class="card-title">OmniHuman-1: 重新思考一阶段条件人类动画模型的规模扩大</h3>
|
| 153 |
+
<span class="card-vote">183票</span>
|
| 154 |
+
</div>
|
| 155 |
+
<div class="card-body">
|
| 156 |
+
<div class="card-image">
|
| 157 |
+
<img src="https://cdn.vansin.top/papers/2502.01061/images/3583e10fff609ad1dc5362b5789ed46e6277c44eb0fafcb8492786a6ffe4c6c4.jpg" alt="OmniHuman-1论文图示">
|
| 158 |
+
</div>
|
| 159 |
+
<div class="card-description">
|
| 160 |
+
由ByteDance提出的OmniHuman是一个基于Diffusion Transformer的框架,通过在训练阶段混合与动作相关的条件来扩展数据规模。支持各种人像内容(面部特写、肖像、半身、全身),支持对话和歌唱,处理人物与物体的交互和具有挑战性的身体姿势。
|
| 161 |
+
</div>
|
| 162 |
+
<a href="https://huggingface.co/papers/2502.01061" class="card-link" target="_blank">阅读论文</a>
|
| 163 |
+
</div>
|
| 164 |
+
</div>
|
| 165 |
+
|
| 166 |
+
<!-- 论文卡片3 -->
|
| 167 |
+
<div class="card">
|
| 168 |
+
<div class="card-header">
|
| 169 |
+
<h3 class="card-title">PhysiCo: LLM肩膀上的随机鹦鹉 - 物理概念理解的总结性评估</h3>
|
| 170 |
+
<span class="card-vote">181票</span>
|
| 171 |
+
</div>
|
| 172 |
+
<div class="card-body">
|
| 173 |
+
<div class="card-image">
|
| 174 |
+
<img src="https://cdn.vansin.top/papers/2502.08946/images/aec57b459804ceb552566142d7b684195508a8fa8e3af958b54f4a3d097c64f5.jpg" alt="PhysiCo论文图示">
|
| 175 |
+
</div>
|
| 176 |
+
<div class="card-description">
|
| 177 |
+
由WeChat AI、HKUST、JHU联合提出的PhysiCo是物理概念理解任务的总结性评估,使用网格格式输入抽象地描述物理现象。研究表明:包括GPT-4o、o1和Gemini 2.0在内的最先进LLM在理解能力上落后于人类约40%。
|
| 178 |
+
</div>
|
| 179 |
+
<a href="https://huggingface.co/papers/2502.08946" class="card-link" target="_blank">阅读论文</a>
|
| 180 |
+
</div>
|
| 181 |
+
</div>
|
| 182 |
+
|
| 183 |
+
<!-- 论文卡片4 -->
|
| 184 |
+
<div class="card">
|
| 185 |
+
<div class="card-header">
|
| 186 |
+
<h3 class="card-title">MLGym: 推进AI研究代理的新框架和基准</h3>
|
| 187 |
+
<span class="card-vote">161票</span>
|
| 188 |
+
</div>
|
| 189 |
+
<div class="card-body">
|
| 190 |
+
<div class="card-image">
|
| 191 |
+
<img src="https://cdn.vansin.top/papers/2502.14499/images/19208b638d7455ed955799717c2443e5aa7cc80e30f73323684b46115c6b934a.jpg" alt="MLGym论文图示">
|
| 192 |
+
</div>
|
| 193 |
+
<div class="card-description">
|
| 194 |
+
由Meta等机构提出的MLGym和MLGym-Bench是一个新的框架和基准,用于评估和开发用于AI研究任务的LLM代理。这是第一个用于机器学习(ML)任务的Gym环境,包含13个来自计算机视觉、自然语言处理、强化学习和博弈论等多样化领域的开放式AI研究任务。
|
| 195 |
+
</div>
|
| 196 |
+
<a href="https://huggingface.co/papers/2502.14499" class="card-link" target="_blank">阅读论文</a>
|
| 197 |
+
</div>
|
| 198 |
+
</div>
|
| 199 |
+
|
| 200 |
+
<!-- 论文卡片5 -->
|
| 201 |
+
<div class="card">
|
| 202 |
+
<div class="card-header">
|
| 203 |
+
<h3 class="card-title">Qwen2.5-VL技术报告</h3>
|
| 204 |
+
<span class="card-vote">143票</span>
|
| 205 |
+
</div>
|
| 206 |
+
<div class="card-body">
|
| 207 |
+
<div class="card-image">
|
| 208 |
+
<img src="https://cdn.vansin.top/papers/2502.13923/images/da9a2f9c22c71c3441338bc0e9f9907ae353b21ddc8d5563c431e4bea693879a.jpg" alt="Qwen2.5-VL论文图示">
|
| 209 |
+
</div>
|
| 210 |
+
<div class="card-description">
|
| 211 |
+
由Qwen团队提出的Qwen2.5-VL是Qwen视觉-语言系列的最新旗舰模型,展示了基础能力和创新功能的显著进步。突出特点是能够使用边界框或点准确定位物体,提供从文档中提取结构化数据的能力,以及对图表和布局的详细分析。
|
| 212 |
+
</div>
|
| 213 |
+
<a href="https://huggingface.co/papers/2502.13923" class="card-link" target="_blank">阅读论文</a>
|
| 214 |
+
</div>
|
| 215 |
+
</div>
|
| 216 |
+
|
| 217 |
+
<!-- 论文卡片6 -->
|
| 218 |
+
<div class="card">
|
| 219 |
+
<div class="card-header">
|
| 220 |
+
<h3 class="card-title">InfiniteHiP: 在单个GPU上将语言模型上下文扩展至300万令牌</h3>
|
| 221 |
+
<span class="card-vote">141票</span>
|
| 222 |
+
</div>
|
| 223 |
+
<div class="card-body">
|
| 224 |
+
<div class="card-image">
|
| 225 |
+
<img src="https://cdn.vansin.top/papers/2502.08910/images/c99cc39fdfc9cfdb68b43c5e8993e63ba650db8d0a43b133889f8805e429f585.jpg" alt="InfiniteHiP论文图示">
|
| 226 |
+
</div>
|
| 227 |
+
<div class="card-description">
|
| 228 |
+
研究者引入了InfiniteHiP,这是一个新颖且实用的LLM推理框架,通过模块化层次化token修剪算法动态消除不相关的上下文token来加速处理。此外,他们在推理期间将键值缓存卸载到主机内存,显著减少了GPU内存压力,使其能够在单个L40s 48GB GPU上处理多达300万个token。
|
| 229 |
+
</div>
|
| 230 |
+
<a href="https://huggingface.co/papers/2502.08910" class="card-link" target="_blank">阅读论文</a>
|
| 231 |
+
</div>
|
| 232 |
+
</div>
|
| 233 |
+
|
| 234 |
+
<!-- 论文卡片7 -->
|
| 235 |
+
<div class="card">
|
| 236 |
+
<div class="card-header">
|
| 237 |
+
<h3 class="card-title">1B LLM能否超越405B LLM? 重新思考计算最优化测试时间扩展</h3>
|
| 238 |
+
<span class="card-vote">137票</span>
|
| 239 |
+
</div>
|
| 240 |
+
<div class="card-body">
|
| 241 |
+
<div class="card-image">
|
| 242 |
+
<img src="https://cdn.vansin.top/papers/2502.06703/images/2a5900e913f0319598d70ed801e63f5fe8267c8300330e70d4dbb475a6a2b194.jpg" alt="计算最优化TTS论文图示">
|
| 243 |
+
</div>
|
| 244 |
+
<div class="card-description">
|
| 245 |
+
研究表明,通过使用计算最优TTS策略,极小的政策模型可以超越更大的模型,例如,1B LLM可以在MATH-500上超越405B LLM。此外,在MATH-500和AIME24上,0.5B LLM优于GPT-4o,3B LLM超过405B LLM,7B LLM击败o1和DeepSeek-R1,同时具有更高的推理效率。
|
| 246 |
+
</div>
|
| 247 |
+
<a href="https://huggingface.co/papers/2502.06703" class="card-link" target="_blank">阅读论文</a>
|
| 248 |
+
</div>
|
| 249 |
+
</div>
|
| 250 |
+
|
| 251 |
+
<!-- 论文卡片8 -->
|
| 252 |
+
<div class="card">
|
| 253 |
+
<div class="card-header">
|
| 254 |
+
<h3 class="card-title">LLM-Microscope: 揭示标点符号在Transformer上下文记忆中的隐藏作用</h3>
|
| 255 |
+
<span class="card-vote">135票</span>
|
| 256 |
+
</div>
|
| 257 |
+
<div class="card-body">
|
| 258 |
+
<div class="card-image">
|
| 259 |
+
<img src="https://cdn.vansin.top/papers/2502.15007/images/9eeadaf9829ae7ac4f8df5223c123326e33bb36f6183e8464643d797196b37cd.jpg" alt="LLM-Microscope论文图示">
|
| 260 |
+
</div>
|
| 261 |
+
<div class="card-description">
|
| 262 |
+
由AIRI、Skoltech等机构提出的方法用于量化大型语言模型(LLMs)如何编码和存储上下文信息,揭示了通常被视为次要的标记(如限定词、标点)携带的上下文信息惊人地多。移除这些标记会一致地降低模型在MMLU和BABILong-4k的性能。
|
| 263 |
+
</div>
|
| 264 |
+
<a href="https://huggingface.co/papers/2502.15007" class="card-link" target="_blank">阅读论文</a>
|
| 265 |
+
</div>
|
| 266 |
+
</div>
|
| 267 |
+
|
| 268 |
+
<!-- 论文卡片9 -->
|
| 269 |
+
<div class="card">
|
| 270 |
+
<div class="card-header">
|
| 271 |
+
<h3 class="card-title">NSA: 硬件对齐和原生可训练的稀疏注意力</h3>
|
| 272 |
+
<span class="card-vote">134票</span>
|
| 273 |
+
</div>
|
| 274 |
+
<div class="card-body">
|
| 275 |
+
<div class="card-image">
|
| 276 |
+
<img src="https://cdn.vansin.top/papers/2502.11089/images/6e9f6ef180c548c840789d3a79c06ee82c58140191532cbdd53ae52743352662.jpg" alt="NSA论文图示">
|
| 277 |
+
</div>
|
| 278 |
+
<div class="card-description">
|
| 279 |
+
由DeepSeek-AI和北京大学提出的NSA(Natively trainable Sparse Attention)是一种集算法创新和硬件对齐优化于一体的机制,用于实现高效的长上下文建模。NSA在64k长度序列上的解码、正向传播和反向传播阶段都比完全注意力取得了显著加速。
|
| 280 |
+
</div>
|
| 281 |
+
<a href="https://huggingface.co/papers/2502.11089" class="card-link" target="_blank">阅读论文</a>
|
| 282 |
+
</div>
|
| 283 |
+
</div>
|
| 284 |
+
|
| 285 |
+
<!-- 论文卡片10 -->
|
| 286 |
+
<div class="card">
|
| 287 |
+
<div class="card-header">
|
| 288 |
+
<h3 class="card-title">FailSafeQA: 为金融领域打造的期待意外情况的长上下文问答测试</h3>
|
| 289 |
+
<span class="card-vote">124票</span>
|
| 290 |
+
</div>
|
| 291 |
+
<div class="card-body">
|
| 292 |
+
<div class="card-image">
|
| 293 |
+
<img src="https://cdn.vansin.top/papers/2502.06329/images/bc9fed935f07e7289f351009c2968700d12ab7c40c8fec0c75a7e6d529196800.jpg" alt="FailSafeQA论文图示">
|
| 294 |
+
</div>
|
| 295 |
+
<div class="card-description">
|
| 296 |
+
由Writer, Inc团队提出了一个新的长上下文金融基准FailSafeQA,旨在测试LLM在金融领域基于LLM的查询-回答系统中面对六种人机界面交互变化时的稳健性和上下文感知能力。值得注意的是,被认为是最合规的模型在17%的测试案例中难以维持稳健预测。
|
| 297 |
+
</div>
|
| 298 |
+
<a href="https://huggingface.co/papers/2502.06329" class="card-link" target="_blank">阅读论文</a>
|
| 299 |
+
</div>
|
| 300 |
+
</div>
|
| 301 |
+
|
| 302 |
+
<!-- 论文卡片11 -->
|
| 303 |
+
<div class="card">
|
| 304 |
+
<div class="card-header">
|
| 305 |
+
<h3 class="card-title">通过潜在推理扩展测试时间计算:递归深度方法</h3>
|
| 306 |
+
<span class="card-vote">117票</span>
|
| 307 |
+
</div>
|
| 308 |
+
<div class="card-body">
|
| 309 |
+
<div class="card-image">
|
| 310 |
+
<img src="https://cdn.vansin.top/papers/2502.05171/images/910fdab6af5b7b00f518cbc5e8df986b13da7716ffbb984990a8fc5f93e4d030.jpg" alt="潜在推理论文图示">
|
| 311 |
+
</div>
|
| 312 |
+
<div class="card-description">
|
| 313 |
+
研究者研究了一种新颖的语言模型架构,能够通过在潜在空间中隐式推理来扩展测试时间计算。该模型通过迭代递归块工作,从而在测试时展开到任意深度。研究者证明了所得模型可以改进其在推理基准上的性能,达到相当于500亿参数的计算负载。
|
| 314 |
+
</div>
|
| 315 |
+
<a href="https://huggingface.co/papers/2502.05171" class="card-link" target="_blank">阅读论文</a>
|
| 316 |
+
</div>
|
| 317 |
+
</div>
|
| 318 |
+
|
| 319 |
+
<!-- 论文卡片12 -->
|
| 320 |
+
<div class="card">
|
| 321 |
+
<div class="card-header">
|
| 322 |
+
<h3 class="card-title">SigLIP 2: 具有改进语义理解、定位和密集特征的多语言视觉语言编码器</h3>
|
| 323 |
+
<span class="card-vote">115票</span>
|
| 324 |
+
</div>
|
| 325 |
+
<div class="card-body">
|
| 326 |
+
<div class="card-image">
|
| 327 |
+
<img src="https://cdn.vansin.top/papers/2502.14786/images/c9ca17f56904f05d4df0bc520bc9325fda024cb6b1faebb5338294b286d6ca5e.jpg" alt="SigLIP 2论文图示">
|
| 328 |
+
</div>
|
| 329 |
+
<div class="card-description">
|
| 330 |
+
由Google DeepMind提出的SigLIP 2是一个新的多语言视觉语言编码器系列。通过扩展原始图像-文本训练目标与多种技术,SigLIP 2模型在所有模型规模上的核心能力超过了其前作,包括零样本分类、图像-文本检索,以及为VLM提取视觉表示的迁移性能。
|
| 331 |
+
</div>
|
| 332 |
+
<a href="https://huggingface.co/papers/2502.14786" class="card-link" target="_blank">阅读论文</a>
|
| 333 |
+
</div>
|
| 334 |
+
</div>
|
| 335 |
+
|
| 336 |
+
<!-- 论文卡片13 -->
|
| 337 |
+
<div class="card">
|
| 338 |
+
<div class="card-header">
|
| 339 |
+
<h3 class="card-title">直接对齐算法之间的差异是模糊的</h3>
|
| 340 |
+
<span class="card-vote">111票</span>
|
| 341 |
+
</div>
|
| 342 |
+
<div class="card-body">
|
| 343 |
+
<div class="card-image">
|
| 344 |
+
<img src="https://cdn.vansin.top/papers/2502.01237/images/699615725d3173dc4fe5f37afd3f643cbd417310701e31b3198686b3daf8fecf.jpg" alt="直接对齐算法论文图示">
|
| 345 |
+
</div>
|
| 346 |
+
<div class="card-description">
|
| 347 |
+
研究首先表明直接对齐算法(DAA)中的单阶段方法表现不如两阶段方法。为解决这个问题,研究者将显式SFT阶段引入单阶段ORPO和ASFT中,并引入了β参数来控制偏好优化的强度。分析表明,关键因素是方法是使用成对还是逐点目标,而不是特定的隐式奖励或损失函数。
|
| 348 |
+
</div>
|
| 349 |
+
<a href="https://huggingface.co/papers/2502.01237" class="card-link" target="_blank">阅读论文</a>
|
| 350 |
+
</div>
|
| 351 |
+
</div>
|
| 352 |
+
|
| 353 |
+
<!-- 论文卡片14 -->
|
| 354 |
+
<div class="card">
|
| 355 |
+
<div class="card-header">
|
| 356 |
+
<h3 class="card-title">s1: 简单的测试时间扩展</h3>
|
| 357 |
+
<span class="card-vote">106票</span>
|
| 358 |
+
</div>
|
| 359 |
+
<div class="card-body">
|
| 360 |
+
<div class="card-image">
|
| 361 |
+
<img src="https://cdn.vansin.top/papers/2501.19393/images/2efe8daf66e63bf26718c1bc5bbf12ca79b080ce93679598d808fbad2ab7cf79.jpg" alt="s1论文图示">
|
| 362 |
+
</div>
|
| 363 |
+
<div class="card-description">
|
| 364 |
+
研究者寻求实现测试时间扩展和强大推理性能的最简单方法。他们精心策划了一个名为s1K的小型数据集,包含1,000个配对问题和推理痕迹。使用预算强制来控制测试时间计算,使模型能够复查其答案,经常修复不正确的推理步骤。他们的模型s1在竞赛数学问题上超过了o1-preview高达27%。
|
| 365 |
+
</div>
|
| 366 |
+
<a href="https://huggingface.co/papers/2501.19393" class="card-link" target="_blank">阅读论文</a>
|
| 367 |
+
</div>
|
| 368 |
+
</div>
|
| 369 |
+
|
| 370 |
+
<!-- 论文卡片15 -->
|
| 371 |
+
<div class="card">
|
| 372 |
+
<div class="card-header">
|
| 373 |
+
<h3 class="card-title">SuperGPQA: 跨285个研究生学科扩展LLM评估</h3>
|
| 374 |
+
<span class="card-vote">91票</span>
|
| 375 |
+
</div>
|
| 376 |
+
<div class="card-body">
|
| 377 |
+
<div class="card-image">
|
| 378 |
+
<img src="https://cdn.vansin.top/papers/2502.14739/images/e86591de8d5c85fde8432fc47be52d2e35885764194b7a44f468599823b57cef.jpg" alt="SuperGPQA论文图示">
|
| 379 |
+
</div>
|
| 380 |
+
<div class="card-description">
|
| 381 |
+
由ByteDance提出的SuperGPQA是一个全面的基准,评估LLM在285个研究生水平学科中的知识和推理能力。该基准采用了人类-LLM协作过滤机制,通过基于LLM响应和专家反馈的迭代细化,消除琐碎或模糊的问题。注重推理的模型DeepSeek-R1在SuperGPQA上达到了61.82%的最高准确率。
|
| 382 |
+
</div>
|
| 383 |
+
<a href="https://huggingface.co/papers/2502.14739" class="card-link" target="_blank">阅读论文</a>
|
| 384 |
+
</div>
|
| 385 |
+
</div>
|
| 386 |
+
|
| 387 |
+
<!-- 论文卡片16 -->
|
| 388 |
+
<div class="card">
|
| 389 |
+
<div class="card-header">
|
| 390 |
+
<h3 class="card-title">Goku: 基于流的视频生成基础模型</h3>
|
| 391 |
+
<span class="card-vote">88票</span>
|
| 392 |
+
</div>
|
| 393 |
+
<div class="card-body">
|
| 394 |
+
<div class="card-image">
|
| 395 |
+
<img src="https://cdn.vansin.top/papers/2502.04896/images/7b9d82d237981450faae03a773d1634a11a6779aaa14147ac75f6f7fa17c4d8c.jpg" alt="Goku论文图示">
|
| 396 |
+
</div>
|
| 397 |
+
<div class="card-description">
|
| 398 |
+
由香港大学和ByteDance提出的Goku是一个最先进的联合图像和视频生成模型系列,利用矫正流Transformer实现行业领先的性能。Goku在定性和定量评估中表现出卓越的性能,在文本到图像生成方面在GenEval上达到0.76,在DPG-Bench上达到83.65,在文本到视频任务的VBench上达到84.85。
|
| 399 |
+
</div>
|
| 400 |
+
<a href="https://huggingface.co/papers/2502.04896" class="card-link" target="_blank">阅读论文</a>
|
| 401 |
+
</div>
|
| 402 |
+
</div>
|
| 403 |
+
|
| 404 |
+
<!-- 论文卡片17 -->
|
| 405 |
+
<div class="card">
|
| 406 |
+
<div class="card-header">
|
| 407 |
+
<h3 class="card-title">SynthDetoxM: 现代LLM是少样本平行解毒数据标注器</h3>
|
| 408 |
+
<span class="card-vote">85票</span>
|
| 409 |
+
</div>
|
| 410 |
+
<div class="card-body">
|
| 411 |
+
<div class="card-image">
|
| 412 |
+
<img src="https://cdn.vansin.top/papers/2502.06394/images/699615725d3173dc4fe5f37afd3f643cbd417310701e31b3198686b3daf8fecf.jpg" alt="SynthDetoxM论文图示">
|
| 413 |
+
</div>
|
| 414 |
+
<div class="card-description">
|
| 415 |
+
由AIRI、Skoltech等机构提出的SynthDetoxM是一个手动收集和合成生成的多语言平行文本解毒数据集,包括德语、法语、西班牙语和俄语的16,000对高质量解毒句对。实验表明,在生成的合成数据集上训练的模型比在人类标注的MultiParaDetox数据集上训练的模型表现更好。
|
| 416 |
+
</div>
|
| 417 |
+
<a href="https://huggingface.co/papers/2502.06394" class="card-link" target="_blank">阅读论文</a>
|
| 418 |
+
</div>
|
| 419 |
+
</div>
|
| 420 |
+
|
| 421 |
+
<!-- 论文卡片18 -->
|
| 422 |
+
<div class="card">
|
| 423 |
+
<div class="card-header">
|
| 424 |
+
<h3 class="card-title">SurveyX: 通过大型语言模型实现学术调查自动化</h3>
|
| 425 |
+
<span class="card-vote">83票</span>
|
| 426 |
+
</div>
|
| 427 |
+
<div class="card-body">
|
| 428 |
+
<div class="card-image">
|
| 429 |
+
<img src="https://cdn.vansin.top/papers/2502.14776/images/b0b6ad3bac2e0254a89fcb257e9f480c61e7086480bd7f3d14850cdd2df9c6df.jpg" alt="SurveyX论文图示">
|
| 430 |
+
</div>
|
| 431 |
+
<div class="card-description">
|
| 432 |
+
由中国人民大学等机构提出的SurveyX是一个高效且有组织的自动调查生成系统,将调查撰写过程分解为准备阶段和生成阶段。通过创新地引入在线参考检索、AttributeTree预处理方法和重新润色过程,SurveyX在内容质量和引用质量方面显著优于现有系统。
|
| 433 |
+
</div>
|
| 434 |
+
<a href="https://huggingface.co/papers/2502.14776" class="card-link" target="_blank">阅读论文</a>
|
| 435 |
+
</div>
|
| 436 |
+
</div>
|
| 437 |
+
|
| 438 |
+
<!-- 论文卡片19 -->
|
| 439 |
+
<div class="card">
|
| 440 |
+
<div class="card-header">
|
| 441 |
+
<h3 class="card-title">大型语言扩散模型</h3>
|
| 442 |
+
<span class="card-vote">80票</span>
|
| 443 |
+
</div>
|
| 444 |
+
<div class="card-body">
|
| 445 |
+
<div class="card-image">
|
| 446 |
+
<img src="https://cdn.vansin.top/papers/2502.09992/images/6bee8d45604746c71819666e5d36ab7f32d63a9b199ba52bd33968dad96e5d61.jpg" alt="语言扩散模型论文图示">
|
| 447 |
+
</div>
|
| 448 |
+
<div class="card-description">
|
| 449 |
+
研究者通过引入LLaDA挑战了自回归模型(ARMs)是LLM基石的观念。LLaDA是一个从头训练的扩散模型,采用前向数据掩码过程和由vanilla Transformer参数化的反向过程来建模分布。LLaDA 8B在上下文学习方面与LLaMA3 8B等强大的LLM相比具有竞争力,并解决了逆转诅咒问题。
|
| 450 |
+
</div>
|
| 451 |
+
<a href="https://huggingface.co/papers/2502.09992" class="card-link" target="_blank">阅读论文</a>
|
| 452 |
+
</div>
|
| 453 |
+
</div>
|
| 454 |
+
|
| 455 |
+
<!-- 论文卡片20 -->
|
| 456 |
+
<div class="card">
|
| 457 |
+
<div class="card-header">
|
| 458 |
+
<h3 class="card-title">Soundwave: 在LLM中语音-文本对齐的少即是多</h3>
|
| 459 |
+
<span class="card-vote">76票</span>
|
| 460 |
+
</div>
|
| 461 |
+
<div class="card-body">
|
| 462 |
+
<div class="card-image">
|
| 463 |
+
<img src="https://cdn.vansin.top/papers/2502.12900/images/4ef92e8f784c18e74185aceb46b40bd200ccd64eb31785a154b88dde5dc2bc2a.jpg" alt="Soundwave论文图示">
|
| 464 |
+
</div>
|
| 465 |
+
<div class="card-description">
|
| 466 |
+
由香港中文大学(深圳)提出的Soundwave关注语音和文本之间的两个基本问题:表示空间差距和序列长度不一致。Soundwave利用一种高效的训练策略和新颖的架构来解决这些问题。结果表明,Soundwave在语音翻译和AIR-Bench语音任务上的表现优于先进的Qwen2-Audio,仅使用了五十分之一的训练数据。
|
| 467 |
+
</div>
|
| 468 |
+
<a href="https://huggingface.co/papers/2502.12900" class="card-link" target="_blank">阅读论文</a>
|
| 469 |
+
</div>
|
| 470 |
+
</div>
|
| 471 |
+
|
| 472 |
+
</div>
|
| 473 |
+
</body>
|
| 474 |
+
</html>
|