add files
Browse files- chatbot_icon_2.png +0 -0
- model_params.cfg +20 -0
- style.css +362 -0
- utils/generator.py +290 -0
- utils/logger.py +171 -0
- utils/retriever.py +29 -0
chatbot_icon_2.png
ADDED
|
|
model_params.cfg
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[generator]
|
| 2 |
+
PROVIDER = huggingface
|
| 3 |
+
MODEL = meta-llama/Meta-Llama-3-8B-Instruct
|
| 4 |
+
MAX_TOKENS = 768
|
| 5 |
+
TEMPERATURE = 0.2
|
| 6 |
+
INFERENCE_PROVIDER = novita
|
| 7 |
+
ORGANIZATION = GIZ
|
| 8 |
+
|
| 9 |
+
[reader]
|
| 10 |
+
TYPE = INF_PROVIDERS
|
| 11 |
+
INF_PROVIDER_MODEL = meta-llama/Llama-3.1-8B-Instruct
|
| 12 |
+
DEDICATED_MODEL = meta-llama/Llama-3.1-8B-Instruct
|
| 13 |
+
DEDICATED_ENDPOINT = https://qu2d8m6dmsollhly.us-east-1.aws.endpoints.huggingface.cloud
|
| 14 |
+
NVIDIA_MODEL = meta-llama/Llama-3.1-8B-Instruct
|
| 15 |
+
NVIDIA_ENDPOINT = https://huggingface.co/api/integrations/dgx/v1
|
| 16 |
+
MAX_TOKENS = 768
|
| 17 |
+
INF_PROVIDER = nebius
|
| 18 |
+
|
| 19 |
+
[app]
|
| 20 |
+
dropdown_default = Annual Consolidated OAG 2024
|
style.css
ADDED
|
@@ -0,0 +1,362 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
/* :root {
|
| 3 |
+
--user-image: url('https://ih1.redbubble.net/image.4776899543.6215/st,small,507x507-pad,600x600,f8f8f8.jpg');
|
| 4 |
+
} */
|
| 5 |
+
|
| 6 |
+
.warning-box {
|
| 7 |
+
background-color: #fff3cd;
|
| 8 |
+
border: 1px solid #ffeeba;
|
| 9 |
+
border-radius: 4px;
|
| 10 |
+
padding: 15px 20px;
|
| 11 |
+
font-size: 14px;
|
| 12 |
+
color: #856404;
|
| 13 |
+
display: inline-block;
|
| 14 |
+
margin-bottom: 15px;
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
.tip-box {
|
| 19 |
+
background-color: #f0f9ff;
|
| 20 |
+
border: 1px solid #80d4fa;
|
| 21 |
+
border-radius: 4px;
|
| 22 |
+
margin-top:20px;
|
| 23 |
+
padding: 15px 20px;
|
| 24 |
+
font-size: 14px;
|
| 25 |
+
display: inline-block;
|
| 26 |
+
margin-bottom: 15px;
|
| 27 |
+
width: auto;
|
| 28 |
+
color:black !important;
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
body.dark .warning-box * {
|
| 32 |
+
color:black !important;
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
body.dark .tip-box * {
|
| 37 |
+
color:black !important;
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
.tip-box-title {
|
| 42 |
+
font-weight: bold;
|
| 43 |
+
font-size: 14px;
|
| 44 |
+
margin-bottom: 5px;
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
.light-bulb {
|
| 48 |
+
display: inline;
|
| 49 |
+
margin-right: 5px;
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
.gr-box {border-color: #d6c37c}
|
| 53 |
+
|
| 54 |
+
#hidden-message{
|
| 55 |
+
display:none;
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
.message{
|
| 59 |
+
font-size:14px !important;
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
a {
|
| 64 |
+
text-decoration: none;
|
| 65 |
+
color: inherit;
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
.card {
|
| 69 |
+
background-color: white;
|
| 70 |
+
border-radius: 10px;
|
| 71 |
+
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
|
| 72 |
+
overflow: hidden;
|
| 73 |
+
display: flex;
|
| 74 |
+
flex-direction: column;
|
| 75 |
+
margin:20px;
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
.card-content {
|
| 79 |
+
padding: 20px;
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
.card-content h2 {
|
| 83 |
+
font-size: 14px !important;
|
| 84 |
+
font-weight: bold;
|
| 85 |
+
margin-bottom: 10px;
|
| 86 |
+
margin-top:0px !important;
|
| 87 |
+
color:#dc2626!important;;
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
.card-content p {
|
| 91 |
+
font-size: 12px;
|
| 92 |
+
margin-bottom: 0;
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
.card-footer {
|
| 96 |
+
background-color: #f4f4f4;
|
| 97 |
+
font-size: 10px;
|
| 98 |
+
padding: 10px;
|
| 99 |
+
display: flex;
|
| 100 |
+
justify-content: space-between;
|
| 101 |
+
align-items: center;
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
.card-footer span {
|
| 105 |
+
flex-grow: 1;
|
| 106 |
+
text-align: left;
|
| 107 |
+
color: #999 !important;
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
.pdf-link {
|
| 111 |
+
display: inline-flex;
|
| 112 |
+
align-items: center;
|
| 113 |
+
margin-left: auto;
|
| 114 |
+
text-decoration: none!important;
|
| 115 |
+
font-size: 14px;
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
.message.user{
|
| 121 |
+
/* background-color:#7494b0 !important; */
|
| 122 |
+
border:none;
|
| 123 |
+
/* color:white!important; */
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
.message.bot{
|
| 127 |
+
/* background-color:#f2f2f7 !important; */
|
| 128 |
+
border:none;
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
/* .gallery-item > div:hover{
|
| 132 |
+
background-color:#7494b0 !important;
|
| 133 |
+
color:white!important;
|
| 134 |
+
}
|
| 135 |
+
.gallery-item:hover{
|
| 136 |
+
border:#7494b0 !important;
|
| 137 |
+
}
|
| 138 |
+
.gallery-item > div{
|
| 139 |
+
background-color:white !important;
|
| 140 |
+
color:#577b9b!important;
|
| 141 |
+
}
|
| 142 |
+
.label{
|
| 143 |
+
color:#577b9b!important;
|
| 144 |
+
} */
|
| 145 |
+
|
| 146 |
+
/* .paginate{
|
| 147 |
+
color:#577b9b!important;
|
| 148 |
+
} */
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
/* span[data-testid="block-info"]{
|
| 153 |
+
background:none !important;
|
| 154 |
+
color:#577b9b;
|
| 155 |
+
} */
|
| 156 |
+
|
| 157 |
+
/* Pseudo-element for the circularly cropped picture */
|
| 158 |
+
/* .message.bot::before {
|
| 159 |
+
content: '';
|
| 160 |
+
position: absolute;
|
| 161 |
+
top: -10px;
|
| 162 |
+
left: -10px;
|
| 163 |
+
width: 30px;
|
| 164 |
+
height: 30px;
|
| 165 |
+
background-image: var(--user-image);
|
| 166 |
+
background-size: cover;
|
| 167 |
+
background-position: center;
|
| 168 |
+
border-radius: 50%;
|
| 169 |
+
z-index: 10;
|
| 170 |
+
}
|
| 171 |
+
*/
|
| 172 |
+
|
| 173 |
+
label.selected{
|
| 174 |
+
background:none !important;
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
#submit-button{
|
| 178 |
+
padding:0px !important;
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
@media screen and (min-width: 1024px) {
|
| 183 |
+
div#tab-examples{
|
| 184 |
+
height:calc(100vh - 190px) !important;
|
| 185 |
+
overflow-y: auto;
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
div#sources-textbox{
|
| 189 |
+
height:calc(100vh - 190px) !important;
|
| 190 |
+
overflow-y: auto !important;
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
div#tab-config{
|
| 194 |
+
height:calc(100vh - 190px) !important;
|
| 195 |
+
overflow-y: auto !important;
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
div#chatbot-row{
|
| 199 |
+
height:calc(100vh - 90px) !important;
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
div#chatbot{
|
| 203 |
+
height:calc(100vh - 170px) !important;
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
.max-height{
|
| 207 |
+
height:calc(100vh - 90px) !important;
|
| 208 |
+
overflow-y: auto;
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
/* .tabitem:nth-child(n+3) {
|
| 212 |
+
padding-top:30px;
|
| 213 |
+
padding-left:40px;
|
| 214 |
+
padding-right:40px;
|
| 215 |
+
} */
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
footer {
|
| 219 |
+
visibility: hidden;
|
| 220 |
+
display:none !important;
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
@media screen and (max-width: 767px) {
|
| 225 |
+
/* Your mobile-specific styles go here */
|
| 226 |
+
|
| 227 |
+
div#chatbot{
|
| 228 |
+
height:500px !important;
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
#submit-button{
|
| 232 |
+
padding:0px !important;
|
| 233 |
+
min-width: 80px;
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
/* This will hide all list items */
|
| 237 |
+
div.tab-nav button {
|
| 238 |
+
display: none !important;
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
/* This will show only the first list item */
|
| 242 |
+
div.tab-nav button:first-child {
|
| 243 |
+
display: block !important;
|
| 244 |
+
}
|
| 245 |
+
|
| 246 |
+
/* This will show only the first list item */
|
| 247 |
+
div.tab-nav button:nth-child(2) {
|
| 248 |
+
display: block !important;
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
#right-panel button{
|
| 252 |
+
display: block !important;
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
/* ... add other mobile-specific styles ... */
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
body.dark .card{
|
| 260 |
+
background-color: #374151;
|
| 261 |
+
}
|
| 262 |
+
|
| 263 |
+
body.dark .card-content h2{
|
| 264 |
+
color:#f4dbd3 !important;
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
body.dark .card-footer {
|
| 268 |
+
background-color: #404652;
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
body.dark .card-footer span {
|
| 272 |
+
color:white !important;
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
.doc-ref{
|
| 277 |
+
color:#dc2626!important;
|
| 278 |
+
margin-right:1px;
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
.tabitem{
|
| 282 |
+
border:none !important;
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
.other-tabs > div{
|
| 286 |
+
padding-left:40px;
|
| 287 |
+
padding-right:40px;
|
| 288 |
+
padding-top:10px;
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
.gallery-item > div{
|
| 292 |
+
white-space: normal !important; /* Allow the text to wrap */
|
| 293 |
+
word-break: break-word !important; /* Break words to prevent overflow */
|
| 294 |
+
overflow-wrap: break-word !important; /* Break long words if necessary */
|
| 295 |
+
}
|
| 296 |
+
|
| 297 |
+
span.chatbot > p > img{
|
| 298 |
+
margin-top:40px !important;
|
| 299 |
+
max-height: none !important;
|
| 300 |
+
max-width: 80% !important;
|
| 301 |
+
border-radius:0px !important;
|
| 302 |
+
}
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
.chatbot-caption{
|
| 306 |
+
font-size:11px;
|
| 307 |
+
font-style:italic;
|
| 308 |
+
color:#508094;
|
| 309 |
+
}
|
| 310 |
+
|
| 311 |
+
.ai-generated{
|
| 312 |
+
font-size:11px!important;
|
| 313 |
+
font-style:italic;
|
| 314 |
+
color:#73b8d4 !important;
|
| 315 |
+
}
|
| 316 |
+
|
| 317 |
+
.card-image > .card-content{
|
| 318 |
+
background-color:#f1f7fa !important;
|
| 319 |
+
}
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
.tab-nav > button.selected{
|
| 324 |
+
color:#4b8ec3;
|
| 325 |
+
font-weight:bold;
|
| 326 |
+
border:none;
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
.tab-nav{
|
| 330 |
+
border:none !important;
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
#input-textbox > label > textarea{
|
| 334 |
+
border-radius:40px;
|
| 335 |
+
padding-left:30px;
|
| 336 |
+
resize:none;
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
#input-message > div{
|
| 340 |
+
border:none;
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
#dropdown-samples{
|
| 344 |
+
/*! border:none !important; */
|
| 345 |
+
/*! border-width:0px !important; */
|
| 346 |
+
background:none !important;
|
| 347 |
+
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
#dropdown-samples > .container > .wrap{
|
| 351 |
+
background-color:white;
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
#tab-examples > div > .form{
|
| 356 |
+
border:none;
|
| 357 |
+
background:none !important;
|
| 358 |
+
}
|
| 359 |
+
|
| 360 |
+
.a-doc-ref{
|
| 361 |
+
text-decoration: none !important;
|
| 362 |
+
}
|
utils/generator.py
ADDED
|
@@ -0,0 +1,290 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import asyncio
|
| 3 |
+
import json
|
| 4 |
+
import ast
|
| 5 |
+
from typing import List, Dict, Any, Union
|
| 6 |
+
from dotenv import load_dotenv
|
| 7 |
+
|
| 8 |
+
# LangChain imports
|
| 9 |
+
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
|
| 10 |
+
from langchain_core.messages import SystemMessage, HumanMessage
|
| 11 |
+
|
| 12 |
+
import os
|
| 13 |
+
import configparser
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def getconfig(configfile_path: str):
|
| 17 |
+
"""
|
| 18 |
+
Read the config file
|
| 19 |
+
Params
|
| 20 |
+
----------------
|
| 21 |
+
configfile_path: file path of .cfg file
|
| 22 |
+
"""
|
| 23 |
+
config = configparser.ConfigParser()
|
| 24 |
+
try:
|
| 25 |
+
config.read_file(open(configfile_path))
|
| 26 |
+
return config
|
| 27 |
+
except:
|
| 28 |
+
logging.warning("config file not found")
|
| 29 |
+
|
| 30 |
+
# ---------------------------------------------------------------------
|
| 31 |
+
# Provider-agnostic authentication and configuration
|
| 32 |
+
# ---------------------------------------------------------------------
|
| 33 |
+
|
| 34 |
+
def get_auth(provider: str) -> dict:
|
| 35 |
+
"""Get authentication configuration for different providers"""
|
| 36 |
+
auth_configs = {
|
| 37 |
+
"openai": {"api_key": os.getenv("OPENAI_API_KEY")},
|
| 38 |
+
"huggingface": {"api_key": os.getenv("HF_TOKEN")},
|
| 39 |
+
"anthropic": {"api_key": os.getenv("ANTHROPIC_API_KEY")},
|
| 40 |
+
"cohere": {"api_key": os.getenv("COHERE_API_KEY")},
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
if provider not in auth_configs:
|
| 44 |
+
raise ValueError(f"Unsupported provider: {provider}")
|
| 45 |
+
|
| 46 |
+
auth_config = auth_configs[provider]
|
| 47 |
+
api_key = auth_config.get("api_key")
|
| 48 |
+
|
| 49 |
+
if not api_key:
|
| 50 |
+
raise RuntimeError(f"Missing API key for provider '{provider}'. Please set the appropriate environment variable.")
|
| 51 |
+
|
| 52 |
+
return auth_config
|
| 53 |
+
|
| 54 |
+
# ---------------------------------------------------------------------
|
| 55 |
+
# Model / client initialization (non exaustive list of providers)
|
| 56 |
+
# ---------------------------------------------------------------------
|
| 57 |
+
|
| 58 |
+
config = getconfig("model_params.cfg")
|
| 59 |
+
|
| 60 |
+
PROVIDER = config.get("generator", "PROVIDER")
|
| 61 |
+
MODEL = config.get("generator", "MODEL")
|
| 62 |
+
MAX_TOKENS = int(config.get("generator", "MAX_TOKENS"))
|
| 63 |
+
TEMPERATURE = float(config.get("generator", "TEMPERATURE"))
|
| 64 |
+
INFERENCE_PROVIDER = config.get("generator", "INFERENCE_PROVIDER")
|
| 65 |
+
ORGANIZATION = config.get("generator", "ORGANIZATION")
|
| 66 |
+
|
| 67 |
+
# Set up authentication for the selected provider
|
| 68 |
+
auth_config = get_auth(PROVIDER)
|
| 69 |
+
|
| 70 |
+
def get_chat_model():
|
| 71 |
+
"""Initialize the appropriate LangChain chat model based on provider"""
|
| 72 |
+
common_params = {
|
| 73 |
+
"temperature": TEMPERATURE,
|
| 74 |
+
"max_tokens": MAX_TOKENS,
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
# if PROVIDER == "openai":
|
| 78 |
+
# return ChatOpenAI(
|
| 79 |
+
# model=MODEL,
|
| 80 |
+
# openai_api_key=auth_config["api_key"],
|
| 81 |
+
# **common_params
|
| 82 |
+
# )
|
| 83 |
+
# elif PROVIDER == "anthropic":
|
| 84 |
+
# return ChatAnthropic(
|
| 85 |
+
# model=MODEL,
|
| 86 |
+
# anthropic_api_key=auth_config["api_key"],
|
| 87 |
+
# **common_params
|
| 88 |
+
# )
|
| 89 |
+
# elif PROVIDER == "cohere":
|
| 90 |
+
# return ChatCohere(
|
| 91 |
+
# model=MODEL,
|
| 92 |
+
# cohere_api_key=auth_config["api_key"],
|
| 93 |
+
# **common_params
|
| 94 |
+
# )
|
| 95 |
+
if PROVIDER == "huggingface":
|
| 96 |
+
# Initialize HuggingFaceEndpoint with explicit parameters
|
| 97 |
+
llm = HuggingFaceEndpoint(
|
| 98 |
+
repo_id=MODEL,
|
| 99 |
+
huggingfacehub_api_token=auth_config["api_key"],
|
| 100 |
+
task="text-generation",
|
| 101 |
+
provider=INFERENCE_PROVIDER,
|
| 102 |
+
server_kwargs={"bill_to": ORGANIZATION},
|
| 103 |
+
temperature=TEMPERATURE,
|
| 104 |
+
max_new_tokens=MAX_TOKENS
|
| 105 |
+
)
|
| 106 |
+
return ChatHuggingFace(llm=llm)
|
| 107 |
+
else:
|
| 108 |
+
raise ValueError(f"Unsupported provider: {PROVIDER}")
|
| 109 |
+
|
| 110 |
+
# Initialize provider-agnostic chat model
|
| 111 |
+
chat_model = get_chat_model()
|
| 112 |
+
|
| 113 |
+
# ---------------------------------------------------------------------
|
| 114 |
+
# Context processing - may need further refinement (i.e. to manage other data sources)
|
| 115 |
+
# ---------------------------------------------------------------------
|
| 116 |
+
# def extract_relevant_fields(retrieval_results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
| 117 |
+
# """
|
| 118 |
+
# Extract only relevant fields from retrieval results.
|
| 119 |
+
|
| 120 |
+
# Args:
|
| 121 |
+
# retrieval_results: List of JSON objects from retriever
|
| 122 |
+
|
| 123 |
+
# Returns:
|
| 124 |
+
# List of processed objects with only relevant fields
|
| 125 |
+
# """
|
| 126 |
+
|
| 127 |
+
# retrieval_results = ast.literal_eval(retrieval_results)
|
| 128 |
+
|
| 129 |
+
# processed_results = []
|
| 130 |
+
|
| 131 |
+
# for result in retrieval_results:
|
| 132 |
+
# # Extract the answer content
|
| 133 |
+
# answer = result.get('answer', '')
|
| 134 |
+
|
| 135 |
+
# # Extract document identification from metadata
|
| 136 |
+
# metadata = result.get('answer_metadata', {})
|
| 137 |
+
# doc_info = {
|
| 138 |
+
# 'answer': answer,
|
| 139 |
+
# 'filename': metadata.get('filename', 'Unknown'),
|
| 140 |
+
# 'page': metadata.get('page', 'Unknown'),
|
| 141 |
+
# 'year': metadata.get('year', 'Unknown'),
|
| 142 |
+
# 'source': metadata.get('source', 'Unknown'),
|
| 143 |
+
# 'document_id': metadata.get('_id', 'Unknown')
|
| 144 |
+
# }
|
| 145 |
+
|
| 146 |
+
# processed_results.append(doc_info)
|
| 147 |
+
|
| 148 |
+
# return processed_results
|
| 149 |
+
|
| 150 |
+
# def format_context_from_results(processed_results: List[Dict[str, Any]]) -> str:
|
| 151 |
+
# """
|
| 152 |
+
# Format processed retrieval results into a context string for the LLM.
|
| 153 |
+
|
| 154 |
+
# Args:
|
| 155 |
+
# processed_results: List of processed objects with relevant fields
|
| 156 |
+
|
| 157 |
+
# Returns:
|
| 158 |
+
# Formatted context string
|
| 159 |
+
# """
|
| 160 |
+
# if not processed_results:
|
| 161 |
+
# return ""
|
| 162 |
+
|
| 163 |
+
# context_parts = []
|
| 164 |
+
|
| 165 |
+
# for i, result in enumerate(processed_results, 1):
|
| 166 |
+
# doc_reference = f"[Document {i}: {result['filename']}"
|
| 167 |
+
# if result['page'] != 'Unknown':
|
| 168 |
+
# doc_reference += f", Page {result['page']}"
|
| 169 |
+
# if result['year'] != 'Unknown':
|
| 170 |
+
# doc_reference += f", Year {result['year']}"
|
| 171 |
+
# doc_reference += "]"
|
| 172 |
+
|
| 173 |
+
# context_part = f"{doc_reference}\n{result['answer']}\n"
|
| 174 |
+
# context_parts.append(context_part)
|
| 175 |
+
|
| 176 |
+
# return "\n".join(context_parts)
|
| 177 |
+
|
| 178 |
+
# ---------------------------------------------------------------------
|
| 179 |
+
# Core generation function for both Gradio UI and MCP
|
| 180 |
+
# ---------------------------------------------------------------------
|
| 181 |
+
async def _call_llm(messages: list) -> str:
|
| 182 |
+
"""
|
| 183 |
+
Provider-agnostic LLM call using LangChain.
|
| 184 |
+
|
| 185 |
+
Args:
|
| 186 |
+
messages: List of LangChain message objects
|
| 187 |
+
|
| 188 |
+
Returns:
|
| 189 |
+
Generated response content as string
|
| 190 |
+
"""
|
| 191 |
+
try:
|
| 192 |
+
# Use async invoke for better performance
|
| 193 |
+
response = await chat_model.ainvoke(messages)
|
| 194 |
+
print(response)
|
| 195 |
+
return response.content
|
| 196 |
+
#return response.content.strip()
|
| 197 |
+
except Exception as e:
|
| 198 |
+
logging.exception(f"LLM generation failed with provider '{PROVIDER}' and model '{MODEL}': {e}")
|
| 199 |
+
raise
|
| 200 |
+
|
| 201 |
+
def build_messages(question: str, context: str) -> list:
|
| 202 |
+
"""
|
| 203 |
+
Build messages in LangChain format.
|
| 204 |
+
|
| 205 |
+
Args:
|
| 206 |
+
question: The user's question
|
| 207 |
+
context: The relevant context for answering
|
| 208 |
+
|
| 209 |
+
Returns:
|
| 210 |
+
List of LangChain message objects
|
| 211 |
+
"""
|
| 212 |
+
system_content = (
|
| 213 |
+
"""
|
| 214 |
+
You are an expert assistant. Your task is to generate accurate, helpful responses using only the
|
| 215 |
+
information contained in the "CONTEXT" provided.
|
| 216 |
+
|
| 217 |
+
Instructions:
|
| 218 |
+
- Answer based only on provided context: Use only the information present in the retrieved_paragraphs below. Do not use any external knowledge or make assumptions beyond what is explicitly stated.
|
| 219 |
+
- Language matching: Respond in the same language as the user's query.
|
| 220 |
+
- Handle missing information: If the retrieved paragraphs do not contain sufficient information to answer the query, respond with "I don't know" or equivalent in the query language. If information is incomplete, state what you know and acknowledge limitations.
|
| 221 |
+
- Be accurate and specific: When information is available, provide clear, specific answers. Include relevant details, useful facts, and numbers from the context.
|
| 222 |
+
- Stay focused: Answer only what is asked. Do not provide additional information not requested.
|
| 223 |
+
- Structure your response effectively:
|
| 224 |
+
* Do not just summarize each passage one by one. Group your summaries to highlight the key parts in the explanation.
|
| 225 |
+
* Use bullet points and lists when it makes sense to improve readability.
|
| 226 |
+
* You do not need to use every passage. Only use the ones that help answer the question.
|
| 227 |
+
- Format your response properly: Use markdown formatting (bullet points, numbered lists, headers) to make your response clear and easy to read. Example: <br> for linebreaks
|
| 228 |
+
|
| 229 |
+
Input Format:
|
| 230 |
+
- Query: {query}
|
| 231 |
+
- Retrieved Paragraphs: {retrieved_paragraphs}
|
| 232 |
+
|
| 233 |
+
Generate your response based on these guidelines.
|
| 234 |
+
|
| 235 |
+
"""
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
user_content = f"### CONTEXT\n{context}\n\n### USER QUESTION\n{question}"
|
| 239 |
+
|
| 240 |
+
return [
|
| 241 |
+
SystemMessage(content=system_content),
|
| 242 |
+
HumanMessage(content=user_content)
|
| 243 |
+
]
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
async def generate(query: str, context: Union[str, List[Dict[str, Any]]]) -> str:
|
| 247 |
+
"""
|
| 248 |
+
Generate an answer to a query using provided context through RAG.
|
| 249 |
+
|
| 250 |
+
This function takes a user query and relevant context, then uses a language model
|
| 251 |
+
to generate a comprehensive answer based on the provided information.
|
| 252 |
+
|
| 253 |
+
Args:
|
| 254 |
+
query (str): User query
|
| 255 |
+
context (list): List of retrieval result objects (dictionaries)
|
| 256 |
+
Returns:
|
| 257 |
+
str: The generated answer based on the query and context
|
| 258 |
+
"""
|
| 259 |
+
if not query.strip():
|
| 260 |
+
return "Error: Query cannot be empty"
|
| 261 |
+
|
| 262 |
+
# Handle both string context (for Gradio UI) and list context (from retriever)
|
| 263 |
+
if isinstance(context, list):
|
| 264 |
+
if not context:
|
| 265 |
+
return "Error: No retrieval results provided"
|
| 266 |
+
|
| 267 |
+
# # Process the retrieval results
|
| 268 |
+
# processed_results = extract_relevant_fields(context)
|
| 269 |
+
formatted_context = context
|
| 270 |
+
|
| 271 |
+
# if not formatted_context.strip():
|
| 272 |
+
# return "Error: No valid content found in retrieval results"
|
| 273 |
+
|
| 274 |
+
elif isinstance(context, str):
|
| 275 |
+
if not context.strip():
|
| 276 |
+
return "Error: Context cannot be empty"
|
| 277 |
+
formatted_context = context
|
| 278 |
+
|
| 279 |
+
else:
|
| 280 |
+
return "Error: Context must be either a string or list of retrieval results"
|
| 281 |
+
|
| 282 |
+
try:
|
| 283 |
+
messages = build_messages(query, formatted_context)
|
| 284 |
+
answer = await _call_llm(messages)
|
| 285 |
+
|
| 286 |
+
return answer
|
| 287 |
+
|
| 288 |
+
except Exception as e:
|
| 289 |
+
logging.exception("Generation failed")
|
| 290 |
+
return f"Error: {str(e)}"
|
utils/logger.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import logging
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
from uuid import uuid4
|
| 5 |
+
import requests
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from datasets import load_dataset, Dataset
|
| 8 |
+
import os
|
| 9 |
+
from huggingface_hub import CommitScheduler, HfApi
|
| 10 |
+
import random
|
| 11 |
+
|
| 12 |
+
class ChatLogger:
|
| 13 |
+
def __init__(self, scheduler):
|
| 14 |
+
"""Initialize the chat logger with paths and configurations"""
|
| 15 |
+
if not scheduler:
|
| 16 |
+
raise ValueError("Scheduler is required")
|
| 17 |
+
|
| 18 |
+
self.scheduler = scheduler
|
| 19 |
+
self.json_dataset_dir = Path(scheduler.folder_path)
|
| 20 |
+
|
| 21 |
+
# Ensure the directory exists
|
| 22 |
+
try:
|
| 23 |
+
self.json_dataset_dir.mkdir(parents=True, exist_ok=True)
|
| 24 |
+
logging.info(f"Using dataset directory at: {self.json_dataset_dir}")
|
| 25 |
+
except Exception as e:
|
| 26 |
+
logging.error(f"Error creating dataset directory: {str(e)}")
|
| 27 |
+
raise
|
| 28 |
+
|
| 29 |
+
self.logs_path = self.json_dataset_dir / f"logs-{uuid4()}.jsonl"
|
| 30 |
+
logging.info(f"Log file will be created at: {self.logs_path}")
|
| 31 |
+
|
| 32 |
+
def get_client_ip(self, request=None):
|
| 33 |
+
"""Get the client IP address from the request context"""
|
| 34 |
+
try:
|
| 35 |
+
if request:
|
| 36 |
+
# Try different headers that might contain the real IP
|
| 37 |
+
ip = request.client.host
|
| 38 |
+
# Check for proxy headers
|
| 39 |
+
forwarded_for = request.headers.get('X-Forwarded-For')
|
| 40 |
+
if forwarded_for:
|
| 41 |
+
# X-Forwarded-For can contain multiple IPs - first one is the client
|
| 42 |
+
ip = forwarded_for.split(',')[0].strip()
|
| 43 |
+
|
| 44 |
+
logging.debug(f"Client IP detected: {ip}")
|
| 45 |
+
return ip
|
| 46 |
+
except Exception as e:
|
| 47 |
+
logging.error(f"Error getting client IP: {e}")
|
| 48 |
+
return "127.0.0.1"
|
| 49 |
+
|
| 50 |
+
def get_client_location(self, ip_address):
|
| 51 |
+
"""Get geolocation info using ipapi.co"""
|
| 52 |
+
headers = {
|
| 53 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
|
| 54 |
+
}
|
| 55 |
+
try:
|
| 56 |
+
response = requests.get(
|
| 57 |
+
f'https://ipapi.co/{ip_address}/json/',
|
| 58 |
+
headers=headers,
|
| 59 |
+
timeout=5
|
| 60 |
+
)
|
| 61 |
+
if response.status_code == 200:
|
| 62 |
+
data = response.json()
|
| 63 |
+
# Add random noise between -0.01 and 0.01 degrees (roughly ±1km)
|
| 64 |
+
lat = data.get('latitude')
|
| 65 |
+
lon = data.get('longitude')
|
| 66 |
+
if lat is not None and lon is not None:
|
| 67 |
+
lat += random.uniform(-0.01, 0.01)
|
| 68 |
+
lon += random.uniform(-0.01, 0.01)
|
| 69 |
+
|
| 70 |
+
return {
|
| 71 |
+
'city': data.get('city'),
|
| 72 |
+
'region': data.get('region'),
|
| 73 |
+
'country': data.get('country_name'),
|
| 74 |
+
'latitude': lat,
|
| 75 |
+
'longitude': lon
|
| 76 |
+
}
|
| 77 |
+
elif response.status_code == 429:
|
| 78 |
+
logging.warning(f"Rate limit exceeded for IP lookup")
|
| 79 |
+
return None
|
| 80 |
+
else:
|
| 81 |
+
logging.error(f"Error in IP lookup: Status code {response.status_code}")
|
| 82 |
+
return None
|
| 83 |
+
|
| 84 |
+
except requests.exceptions.RequestException as e:
|
| 85 |
+
logging.error(f"Request failed in IP lookup: {str(e)}")
|
| 86 |
+
return None
|
| 87 |
+
|
| 88 |
+
def create_log_entry(self, query, answer, retrieved_content, feedback=None, request=None):
|
| 89 |
+
"""Create a structured log entry with all required fields"""
|
| 90 |
+
timestamp = datetime.now().timestamp()
|
| 91 |
+
|
| 92 |
+
# Get client location if request is provided
|
| 93 |
+
ip = self.get_client_ip(request) if request else None
|
| 94 |
+
location = self.get_client_location(ip) if ip else None
|
| 95 |
+
|
| 96 |
+
log_entry = {
|
| 97 |
+
"record_id": str(uuid4()),
|
| 98 |
+
"session_id": str(uuid4()), # In practice, this should be passed in from the session
|
| 99 |
+
"time": str(timestamp),
|
| 100 |
+
"client_location": location,
|
| 101 |
+
"question": query,
|
| 102 |
+
"answer": answer,
|
| 103 |
+
"retrieved_content": retrieved_content if isinstance(retrieved_content, list) else [retrieved_content],
|
| 104 |
+
"feedback": feedback
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
return log_entry
|
| 108 |
+
|
| 109 |
+
def cleanup_local_files(self):
|
| 110 |
+
"""Delete local JSON files after successful upload"""
|
| 111 |
+
try:
|
| 112 |
+
# List all files in json_dataset directory
|
| 113 |
+
for file in self.json_dataset_dir.glob("*.json*"):
|
| 114 |
+
try:
|
| 115 |
+
file.unlink() # Delete file
|
| 116 |
+
logging.info(f"Deleted local file: {file}")
|
| 117 |
+
except Exception as e:
|
| 118 |
+
logging.error(f"Error deleting file {file}: {e}")
|
| 119 |
+
|
| 120 |
+
# Optionally remove the directory if empty
|
| 121 |
+
if not any(self.json_dataset_dir.iterdir()):
|
| 122 |
+
self.json_dataset_dir.rmdir()
|
| 123 |
+
logging.info("Removed empty json_dataset directory")
|
| 124 |
+
except Exception as e:
|
| 125 |
+
logging.error(f"Error in cleanup: {e}")
|
| 126 |
+
|
| 127 |
+
def save_local(self, log_entry):
|
| 128 |
+
"""Save log entry to local JSONL file"""
|
| 129 |
+
try:
|
| 130 |
+
# Ensure parent directory exists
|
| 131 |
+
self.logs_path.parent.mkdir(parents=True, exist_ok=True)
|
| 132 |
+
|
| 133 |
+
# Reorder fields for consistency
|
| 134 |
+
field_order = [
|
| 135 |
+
"record_id",
|
| 136 |
+
"session_id",
|
| 137 |
+
"time",
|
| 138 |
+
"client_location",
|
| 139 |
+
"question",
|
| 140 |
+
"answer",
|
| 141 |
+
"retrieved_content",
|
| 142 |
+
"feedback"
|
| 143 |
+
]
|
| 144 |
+
ordered_logs = {k: log_entry.get(k) for k in field_order if k in log_entry}
|
| 145 |
+
|
| 146 |
+
with self.scheduler.lock:
|
| 147 |
+
with open(self.logs_path, 'a') as f:
|
| 148 |
+
json.dump(ordered_logs, f)
|
| 149 |
+
f.write('\n')
|
| 150 |
+
logging.info(f"Log entry saved to {self.logs_path}")
|
| 151 |
+
|
| 152 |
+
# After successful write, trigger cleanup
|
| 153 |
+
self.cleanup_local_files()
|
| 154 |
+
return True
|
| 155 |
+
except Exception as e:
|
| 156 |
+
logging.error(f"Error saving to local file: {str(e)}")
|
| 157 |
+
return False
|
| 158 |
+
|
| 159 |
+
def log(self, query, answer, retrieved_content, feedback=None, request=None):
|
| 160 |
+
"""Main logging method that handles both local and HF storage"""
|
| 161 |
+
# Create log entry
|
| 162 |
+
log_entry = self.create_log_entry(
|
| 163 |
+
query=query,
|
| 164 |
+
answer=answer,
|
| 165 |
+
retrieved_content=retrieved_content,
|
| 166 |
+
feedback=feedback,
|
| 167 |
+
request=request
|
| 168 |
+
)
|
| 169 |
+
logging.info("Logging results completed")
|
| 170 |
+
# Save locally with thread safety
|
| 171 |
+
return self.save_local(log_entry)
|
utils/retriever.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from gradio_client import Client
|
| 3 |
+
import logging
|
| 4 |
+
|
| 5 |
+
def retrieve_paragraphs(query, category = None):
|
| 6 |
+
"""Connect to retriever and retrieve paragraphs"""
|
| 7 |
+
try:
|
| 8 |
+
# Call the API with the uploaded file
|
| 9 |
+
client = Client("https://giz-chatfed-retriever0-3.hf.space/")
|
| 10 |
+
print("calling retriever at https://giz-chatfed-retriever0-3.hf.space/", flush=True)
|
| 11 |
+
logging.info("calling retriever at https://giz-chatfed-retriever0-3.hf.space")
|
| 12 |
+
filter_metadata = None
|
| 13 |
+
if category:
|
| 14 |
+
filter_metadata = {'category': category}
|
| 15 |
+
result = client.predict(
|
| 16 |
+
query=query,
|
| 17 |
+
collection_name= "Humboldt",
|
| 18 |
+
filter_metadata = filter_metadata,
|
| 19 |
+
api_name="/retrieve"
|
| 20 |
+
)
|
| 21 |
+
return result
|
| 22 |
+
|
| 23 |
+
except Exception as e:
|
| 24 |
+
error_msg = f"Error retrieving paragraphs: {str(e)}"
|
| 25 |
+
return (
|
| 26 |
+
error_msg,
|
| 27 |
+
gr.update(visible=True), # upload_status
|
| 28 |
+
gr.update(visible=False) # results_table
|
| 29 |
+
)
|