File size: 24,264 Bytes
21446aa
 
 
 
 
 
 
 
 
3685b45
21446aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3685b45
21446aa
 
 
 
 
 
 
 
 
7759b7c
21446aa
7759b7c
21446aa
7759b7c
21446aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3685b45
21446aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7759b7c
 
 
 
 
 
 
 
 
 
 
 
 
 
21446aa
 
 
7759b7c
21446aa
 
7759b7c
 
 
 
 
 
 
 
 
 
 
 
 
21446aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3685b45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21446aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
import logging
from typing import List, Dict, Tuple
import time
from concurrent.futures import ThreadPoolExecutor, as_completed

from .engines.duckduckgo import DuckDuckGoEngine
from .engines.cooking import CookingSearchEngine
from .engines.multilingual import MultilingualCookingEngine
from .engines.video import VideoSearchEngine
from .engines.image import ImageSearchEngine
from .extractors.content import ContentExtractor
from .processors.cooking import CookingSearchProcessor
from .processors.language import LanguageProcessor
from .processors.sources import SourceAggregator
from .processors.enhanced import EnhancedContentProcessor
# Reranker removed - using simple relevance scoring for cooking content

logger = logging.getLogger(__name__)

class SearchCoordinator:
    """Coordinate multiple search strategies for comprehensive cooking information"""
    
    def __init__(self, max_workers: int = 3):
        self.max_workers = max_workers
        
        # Initialize search engines
        self.duckduckgo_engine = DuckDuckGoEngine()
        self.cooking_engine = CookingSearchEngine()
        self.multilingual_engine = MultilingualCookingEngine()
        self.video_engine = VideoSearchEngine()
        self.image_engine = ImageSearchEngine()
        
        # Initialize processors
        self.content_extractor = ContentExtractor()
        self.cooking_processor = CookingSearchProcessor()
        self.language_processor = LanguageProcessor()
        self.source_aggregator = SourceAggregator()
        self.enhanced_processor = EnhancedContentProcessor()
        self.reranker = None  # No complex reranking needed for cooking content
        
        # Search strategies - prioritize cooking sources first
        self.strategies = [
            self._search_cooking_sources,
            self._search_duckduckgo,
            self._search_multilingual
        ]
    
    def search(self, query: str, num_results: int = 10, target_language: str = None) -> Tuple[str, Dict[int, str]]:
        """Execute comprehensive multilingual search with multiple strategies"""
        logger.info(f"Starting comprehensive multilingual search for: {query}")
        
        # Detect and enhance query for multiple languages
        enhanced_queries = self.language_processor.enhance_query(query, target_language)
        logger.info(f"Enhanced queries: {list(enhanced_queries.keys())}")
        
        # Execute search strategies in parallel
        all_results = []
        
        with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
            # Submit search tasks for each language
            future_to_strategy = {}
            
            for lang, enhanced_query in enhanced_queries.items():
                for strategy in self.strategies:
                    future = executor.submit(strategy, enhanced_query, num_results // len(enhanced_queries), lang)
                    future_to_strategy[future] = f"{strategy.__name__}_{lang}"
            
            # Collect results
            for future in as_completed(future_to_strategy):
                strategy_name = future_to_strategy[future]
                try:
                    results = future.result()
                    if results:
                        all_results.extend(results)
                        logger.info(f"{strategy_name} found {len(results)} results")
                except Exception as e:
                    logger.error(f"{strategy_name} failed: {e}")
        
        # Remove duplicates and filter by language preference
        unique_results = self._remove_duplicates(all_results)
        if target_language:
            unique_results = self.language_processor.filter_by_language(unique_results, target_language)
        
        logger.info(f"Total unique results: {len(unique_results)}")
        
        # Extract content from URLs
        enriched_results = self._enrich_with_content(unique_results)
        
        # Simple cooking relevance filtering
        if enriched_results:
            cooking_keywords = ['recipe', 'cooking', 'baking', 'food', 'ingredient', 'kitchen', 'chef', 'meal', 'dish', 'cuisine', 'cook', 'bake', 'roast', 'grill', 'fry', 'boil', 'steam', 'season', 'spice', 'herb', 'sauce', 'marinade', 'dressing']
            relevant_results = []
            for result in enriched_results:
                title = result.get('title', '').lower()
                content = result.get('content', '').lower()
                if any(keyword in title or keyword in content for keyword in cooking_keywords):
                    relevant_results.append(result)
            
            if relevant_results:
                enriched_results = relevant_results
                logger.info(f"Filtered to {len(enriched_results)} cooking-relevant results")
        
        # Process results into comprehensive summary
        summary, url_mapping = self.cooking_processor.process_results(enriched_results, query)
        
        logger.info(f"Multilingual search completed: {len(url_mapping)} sources processed")
        return summary, url_mapping
    
    def _search_multilingual(self, query: str, num_results: int, language: str = None) -> List[Dict]:
        """Search using multilingual cooking engine"""
        try:
            if language:
                results = self.multilingual_engine.search_by_language(query, language, num_results)
            else:
                results = self.multilingual_engine.search(query, num_results)
            return results
        except Exception as e:
            logger.error(f"Multilingual search failed: {e}")
            return []
    
    def _search_duckduckgo(self, query: str, num_results: int, language: str = None) -> List[Dict]:
        """Search using DuckDuckGo engine"""
        try:
            results = self.duckduckgo_engine.search(query, num_results)
            return results
        except Exception as e:
            logger.error(f"DuckDuckGo search failed: {e}")
            return []
    
    def _search_cooking_sources(self, query: str, num_results: int, language: str = None) -> List[Dict]:
        """Search using cooking sources engine"""
        try:
            results = self.cooking_engine.search(query, num_results)
            return results
        except Exception as e:
            logger.error(f"Cooking sources search failed: {e}")
            return []
    
    def _remove_duplicates(self, results: List[Dict]) -> List[Dict]:
        """Remove duplicate results based on URL"""
        seen_urls = set()
        unique_results = []
        
        for result in results:
            url = result.get('url', '')
            if url and url not in seen_urls:
                seen_urls.add(url)
                unique_results.append(result)
        
        return unique_results
    
    def _enrich_with_content(self, results: List[Dict]) -> List[Dict]:
        """Enrich results with extracted content"""
        enriched_results = []
        
        # Extract content in parallel
        with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
            # Submit content extraction tasks
            future_to_result = {
                executor.submit(self.content_extractor.extract, result['url']): result
                for result in results
            }
            
            # Collect enriched results
            for future in as_completed(future_to_result):
                original_result = future_to_result[future]
                try:
                    content = future.result()
                    if content:
                        enriched_result = original_result.copy()
                        enriched_result['content'] = content
                        enriched_results.append(enriched_result)
                except Exception as e:
                    logger.warning(f"Content extraction failed for {original_result['url']}: {e}")
                    # Still include result without content
                    enriched_results.append(original_result)
        
        return enriched_results
    
    def quick_search(self, query: str, num_results: int = 5) -> List[Dict]:
        """Quick search for basic results without content extraction"""
        logger.info(f"Quick search for: {query}")
        
        # Try cooking sources first for better relevance
        results = []
        try:
            cooking_results = self.cooking_engine.search(query, num_results)
            if cooking_results:
                results = cooking_results
                logger.info(f"Cooking engine found {len(results)} results")
        except Exception as e:
            logger.warning(f"Cooking engine failed: {e}")
        
        # If no cooking results, try DuckDuckGo
        if not results:
            logger.info("No cooking results, trying DuckDuckGo")
            results = self.duckduckgo_engine.search(query, num_results)
        
        # If no results, try with simplified query
        if not results:
            logger.warning("No results from search engines, trying simplified query")
            simplified_query = self._simplify_query(query)
            if simplified_query != query:
                # Try cooking sources first with simplified query
                try:
                    cooking_results = self.cooking_engine.search(simplified_query, num_results)
                    if cooking_results:
                        results = cooking_results
                        logger.info(f"Simplified cooking query '{simplified_query}' found {len(results)} results")
                except Exception as e:
                    logger.warning(f"Simplified cooking query failed: {e}")
                
                # If still no results, try DuckDuckGo with simplified query
                if not results:
                    results = self.duckduckgo_engine.search(simplified_query, num_results)
                    logger.info(f"Simplified DuckDuckGo query '{simplified_query}' found {len(results)} results")
        
        # Remove duplicates
        unique_results = self._remove_duplicates(results)
        
        # If we still have no results, create a basic fallback
        if not unique_results:
            logger.warning("No search results found, creating basic fallback")
            unique_results = self._create_fallback_results(query)
        
        logger.info(f"Quick search completed: {len(unique_results)} results")
        return unique_results
    
    def _simplify_query(self, query: str) -> str:
        """Simplify query to core cooking terms"""
        if not query:
            return ""
        
        # Extract key cooking terms
        import re
        words = query.split()
        
        # Keep cooking keywords and important terms
        cooking_keywords = [
            'recipe', 'cooking', 'baking', 'roasting', 'grilling', 'frying', 'boiling', 'steaming',
            'ingredients', 'seasoning', 'spices', 'herbs', 'sauce', 'marinade', 'dressing',
            'technique', 'method', 'temperature', 'timing', 'preparation', 'cooking time',
            'oven', 'stovetop', 'grill', 'pan', 'pot', 'skillet', 'knife', 'cutting',
            'vegetarian', 'vegan', 'gluten-free', 'dairy-free', 'keto', 'paleo', 'diet',
            'appetizer', 'main course', 'dessert', 'breakfast', 'lunch', 'dinner',
            'cuisine', 'italian', 'chinese', 'mexican', 'french', 'indian', 'thai'
        ]
        
        # Keep words that are cooking keywords or are important (longer than 3 chars)
        important_words = []
        for word in words:
            word_lower = word.lower()
            if word_lower in cooking_keywords or len(word) > 3:
                important_words.append(word)
        
        # If we have important words, use them; otherwise use first few words
        if important_words:
            return ' '.join(important_words[:5])  # Max 5 words
        else:
            return ' '.join(words[:3])  # Max 3 words
    
    def _create_fallback_results(self, query: str) -> List[Dict]:
        """Create basic fallback results when search fails"""
        # Create some basic cooking information URLs as fallback
        fallback_urls = [
            "https://www.allrecipes.com",
            "https://www.foodnetwork.com",
            "https://www.epicurious.com",
            "https://www.seriouseats.com",
            "https://www.bonappetit.com"
        ]
        
        results = []
        for i, url in enumerate(fallback_urls[:3]):  # Limit to 3 fallback results
            results.append({
                'url': url,
                'title': f"Cooking Information - {query}",
                'source': 'fallback',
                'composite_score': 0.3 - (i * 0.05)  # Decreasing score
            })
        
        return results
    
    def cooking_focus_search(self, query: str, num_results: int = 8) -> Tuple[str, Dict[int, str]]:
        """Cooking-focused search with enhanced processing"""
        logger.info(f"Cooking focus search for: {query}")
        
        # Use cooking engine primarily
        cooking_results = self.cooking_engine.search(query, num_results)
        
        # Add some general results for context
        general_results = self.duckduckgo_engine.search(query, 3)
        
        # Combine and deduplicate
        all_results = self._remove_duplicates(cooking_results + general_results)
        
        # Enrich with content
        enriched_results = self._enrich_with_content(all_results)
        
        # Simple cooking relevance filtering
        if enriched_results:
            cooking_keywords = ['recipe', 'cooking', 'baking', 'food', 'ingredient', 'kitchen', 'chef', 'meal', 'dish', 'cuisine', 'cook', 'bake', 'roast', 'grill', 'fry', 'boil', 'steam', 'season', 'spice', 'herb', 'sauce', 'marinade', 'dressing']
            relevant_results = []
            for result in enriched_results:
                title = result.get('title', '').lower()
                content = result.get('content', '').lower()
                if any(keyword in title or keyword in content for keyword in cooking_keywords):
                    relevant_results.append(result)
            
            if relevant_results:
                enriched_results = relevant_results
                logger.info(f"Filtered to {len(enriched_results)} cooking-relevant results")
        
        # Process with cooking focus
        summary, url_mapping = self.cooking_processor.process_results(enriched_results, query)
        
        logger.info(f"Cooking focus search completed: {len(url_mapping)} sources")
        return summary, url_mapping
    
    def multilingual_cooking_search(self, query: str, num_results: int = 10, target_language: str = None) -> Tuple[str, Dict[int, str]]:
        """Comprehensive multilingual cooking search"""
        logger.info(f"Multilingual cooking search for: {query} (target: {target_language})")
        
        # Detect source language
        source_language = self.language_processor.detect_language(query)
        logger.info(f"Detected source language: {source_language}")
        
        # Use multilingual search with language preference
        summary, url_mapping = self.search(query, num_results, target_language)
        
        logger.info(f"Multilingual cooking search completed: {len(url_mapping)} sources")
        return summary, url_mapping
    
    def comprehensive_search(self, query: str, num_results: int = 15, target_language: str = None, include_videos: bool = True) -> Tuple[str, Dict[int, str], Dict]:
        """Comprehensive search with maximum information extraction and detailed references"""
        logger.info(f"Starting comprehensive search for: {query} (target: {target_language})")
        
        # Detect source language
        source_language = self.language_processor.detect_language(query)
        logger.info(f"Detected source language: {source_language}")
        
        # Execute comprehensive search
        search_results = []
        video_results = []
        
        # 1. Multilingual text search
        text_summary, text_url_mapping = self.search(query, num_results, target_language)
        
        # 2. Video search if requested
        if include_videos:
            try:
                video_results = self.video_search(query, num_results=5, target_language=target_language)
                logger.info(f"Video search found {len(video_results)} videos")
            except Exception as e:
                logger.warning(f"Video search failed: {e}")
        
        # 3. Aggregate all sources
        all_sources = []
        
        # Add text sources
        for i, url in text_url_mapping.items():
            # Find corresponding source data
            source_data = self._find_source_data(url, text_url_mapping)
            if source_data:
                all_sources.append(source_data)
        
        # Add video sources
        for video in video_results:
            all_sources.append(video)
        
        # 4. Process with enhanced content processor
        if all_sources:
            comprehensive_summary, detailed_mapping = self.enhanced_processor.process_comprehensive_content(all_sources, query)
        else:
            comprehensive_summary = text_summary
            detailed_mapping = text_url_mapping
        
        # 5. Create comprehensive source aggregation
        source_aggregation = self.source_aggregator.aggregate_sources(all_sources, video_results)
        
        # 6. Generate comprehensive references
        comprehensive_references = self.source_aggregator.create_comprehensive_references(all_sources, max_references=20)
        
        # 7. Add inline citations
        final_summary = self.enhanced_processor.create_inline_citations(comprehensive_summary, detailed_mapping)
        
        # 8. Add source statistics
        source_stats = self.enhanced_processor.generate_source_statistics(all_sources)
        
        # 9. Combine everything
        final_response = f"{final_summary}\n\n{comprehensive_references}\n\n{source_stats}"
        
        logger.info(f"Comprehensive search completed: {len(all_sources)} total sources processed")
        
        return final_response, detailed_mapping, source_aggregation
    
    def _find_source_data(self, url: str, url_mapping: Dict[int, str]) -> Dict:
        """Find source data for a given URL"""
        # This is a simplified version - ensure required fields always exist
        return {
            'url': url,
            'title': f"Source: {url}",
            'content': '',
            'domain': self._extract_domain(url),
            'type': 'text',
            'source_type': 'text',
            'language': 'en',
            'source_name': '',
            'platform': ''
        }
    
    def _extract_domain(self, url: str) -> str:
        """Extract domain from URL"""
        try:
            from urllib.parse import urlparse
            parsed = urlparse(url)
            domain = parsed.netloc.lower()
            if domain.startswith('www.'):
                domain = domain[4:]
            return domain
        except:
            return ''
    
    def video_search(self, query: str, num_results: int = 3, target_language: str = None) -> List[Dict]:
        """Search for cooking videos across multiple platforms"""
        logger.info(f"Video search for: {query} (target: {target_language})")
        
        # Detect language if not provided
        if not target_language:
            target_language = self.language_processor.detect_language(query)
        
        # Map language codes
        lang_mapping = {
            'EN': 'en',
            'VI': 'vi', 
            'ZH': 'zh',
            'en': 'en',
            'vi': 'vi',
            'zh': 'zh'
        }
        search_language = lang_mapping.get(target_language, 'en')
        
        # Search for videos
        raw_results = self.video_engine.search(query, num_results, search_language)
        
        # Simple video relevance filtering
        cooking_keywords = ['recipe', 'cooking', 'baking', 'food', 'ingredient', 'kitchen', 'chef', 'meal', 'dish', 'cuisine', 'cook', 'bake', 'roast', 'grill', 'fry', 'boil', 'steam', 'season', 'spice', 'herb', 'sauce', 'marinade', 'dressing']
        filtered_video_results = []
        for result in raw_results:
            title = result.get('title', '').lower()
            if any(keyword in title for keyword in cooking_keywords):
                filtered_video_results.append(result)
        
        # Validate and normalize results to avoid corrupted cards/links
        video_results = self._sanitize_video_results(filtered_video_results, limit=num_results)
        
        logger.info(f"Video search completed: {len(video_results)} videos found")
        return video_results
    
    def image_search(self, query: str, num_results: int = 3, target_language: str = None) -> List[Dict]:
        """Search for cooking-related images"""
        logger.info(f"Image search for: {query} (target: {target_language})")
        
        # Detect language if not provided
        if not target_language:
            target_language = self.language_processor.detect_language(query)
        
        # Map language codes
        lang_mapping = {
            'EN': 'en',
            'VI': 'vi', 
            'ZH': 'zh',
            'en': 'en',
            'vi': 'vi',
            'zh': 'zh'
        }
        search_language = lang_mapping.get(target_language, 'en')
        
        # Search for images
        image_results = self.image_engine.search_cooking_images(query, num_results, search_language)
        
        logger.info(f"Image search completed: {len(image_results)} images found")
        return image_results

    def _sanitize_video_results(self, results: List[Dict], limit: int = 4) -> List[Dict]:
        """Ensure each video has a valid absolute https URL, reasonable title, and platform metadata.
        Drop unreachable/broken items and deduplicate by URL.
        """
        from urllib.parse import urlparse
        import requests
        clean: List[Dict] = []
        seen = set()
        for item in results or []:
            url = (item or {}).get('url', '')
            title = (item or {}).get('title', '').strip()
            if not url or not title:
                continue
            try:
                parsed = urlparse(url)
                if parsed.scheme not in ('http', 'https'):
                    continue
                if not parsed.netloc:
                    continue
                # Quick reachability check; YouTube often blocks HEAD, so skip strict checks for youtube domain
                host = parsed.netloc.lower()
                norm_url = url
                if 'youtube.com' not in host:
                    try:
                        r = requests.head(url, allow_redirects=True, timeout=3)
                        if r.status_code >= 400:
                            continue
                        norm_url = getattr(r, 'url', url) or url
                    except Exception:
                        # If HEAD blocked, try a light GET with small timeout
                        try:
                            r = requests.get(url, stream=True, timeout=4)
                            if r.status_code >= 400:
                                continue
                            norm_url = getattr(r, 'url', url) or url
                        except Exception:
                            continue
                if norm_url in seen:
                    continue
                seen.add(norm_url)
                platform = parsed.netloc.lower()
                if platform.startswith('www.'):
                    platform = platform[4:]
                clean.append({
                    'title': title,
                    'url': norm_url,
                    'thumbnail': item.get('thumbnail', ''),
                    'source': item.get('source', platform.split('.')[0]),
                    'platform': platform,
                    'language': item.get('language', 'en')
                })
                if len(clean) >= limit:
                    break
            except Exception:
                continue
        return clean