File size: 5,715 Bytes
a17cfac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
"""
Example usage of the DLSCA Test Dataset with streaming zarr support.

This example demonstrates how to use the custom dataset for both local
development and production with streaming capabilities.

Note: You may see "Repo card metadata block was not found" warnings - 
these are harmless and expected for local datasets without published cards.
"""

import os
from test import TestDataset, TestDownloadManager
import numpy as np

def example_local_usage():
    """Example of using the dataset locally for development."""
    print("=== Local Development Usage ===")
    
    # Load dataset locally
    dataset = TestDataset()
    dataset.download_and_prepare()
    dataset_dict = dataset.as_dataset(split="train")
    
    print(f"Dataset size: {len(dataset_dict)}")
    print(f"Features: {list(dataset_dict.features.keys())}")
    
    # Access a few examples
    for i in range(3):
        example = dataset_dict[i]
        print(f"Example {i}: labels={example['labels'][:2]}..., traces_len={len(example['traces'])}")
    
    return dataset_dict

def example_streaming_usage():
    """Example of using the dataset with streaming zarr support."""
    print("\n=== Streaming Usage ===")
    
    # Initialize custom download manager
    dl_manager = TestDownloadManager(dataset_name="dlsca_test")
    
    # Convert traces to zarr format and cache
    traces_path = os.path.join(os.path.dirname(__file__), "data", "traces.npy")
    zarr_zip_path = dl_manager.download_zarr_chunks(traces_path, chunk_size=100)
    print(f"Zarr chunks cached at: {zarr_zip_path}")
    
    # Load dataset with streaming
    dataset = TestDataset()
    
    # Test streaming access to zarr data
    zarr_array = dataset._load_zarr_from_zip(zarr_zip_path)
    print(f"Zarr array shape: {zarr_array.shape}")
    print(f"Zarr array chunks: {zarr_array.chunks}")
    
    # Demonstrate chunk-based access (simulating streaming)
    chunk_size = 100
    num_chunks = (zarr_array.shape[0] + chunk_size - 1) // chunk_size
    print(f"Total chunks: {num_chunks}")
    
    # Access data by chunks (this would be efficient for large datasets)
    for chunk_idx in range(min(3, num_chunks)):  # Just show first 3 chunks
        start_idx = chunk_idx * chunk_size
        end_idx = min(start_idx + chunk_size, zarr_array.shape[0])
        chunk_data = zarr_array[start_idx:end_idx]
        print(f"Chunk {chunk_idx}: shape={chunk_data.shape}, range=[{start_idx}:{end_idx}]")
    
    return zarr_array

def example_chunk_selection():
    """Example of selecting specific chunks for training."""
    print("\n=== Chunk Selection Example ===")
    
    dl_manager = TestDownloadManager()
    traces_path = os.path.join(os.path.dirname(__file__), "data", "traces.npy")
    zarr_zip_path = dl_manager.download_zarr_chunks(traces_path, chunk_size=100)
    
    dataset = TestDataset()
    zarr_array = dataset._load_zarr_from_zip(zarr_zip_path)
    labels = np.load(os.path.join(os.path.dirname(__file__), "data", "labels.npy"))
    
    # Example: Select specific samples for training (e.g., samples 200-299)
    selected_range = slice(200, 300)
    selected_traces = zarr_array[selected_range]
    selected_labels = labels[selected_range]
    
    print(f"Selected traces shape: {selected_traces.shape}")
    print(f"Selected labels shape: {selected_labels.shape}")
    print(f"Sample labels: {selected_labels[:3]}")
    
    return selected_traces, selected_labels

def benchmark_access_patterns():
    """Benchmark different access patterns."""
    print("\n=== Access Pattern Benchmark ===")
    
    import time
    
    # Load both numpy and zarr versions
    traces_np = np.load(os.path.join(os.path.dirname(__file__), "data", "traces.npy"))
    
    dl_manager = TestDownloadManager()
    traces_path = os.path.join(os.path.dirname(__file__), "data", "traces.npy")
    zarr_zip_path = dl_manager.download_zarr_chunks(traces_path, chunk_size=100)
    dataset = TestDataset()
    traces_zarr = dataset._load_zarr_from_zip(zarr_zip_path)
    
    # Benchmark sequential access
    print("Sequential access (first 300 samples):")
    
    # NumPy
    start = time.time()
    np_data = traces_np[:300]
    np_time = time.time() - start
    print(f"  NumPy: {np_time:.4f}s")
    
    # Zarr
    start = time.time()
    zarr_data = traces_zarr[:300]
    zarr_time = time.time() - start
    print(f"  Zarr:  {zarr_time:.4f}s")
    
    # Verify same data
    print(f"  Data identical: {np.array_equal(np_data, zarr_data)}")
    
    # Benchmark random access
    print("\nRandom chunk access (3 chunks):")
    indices = [50, 250, 450]
    
    # NumPy
    start = time.time()
    for idx in indices:
        _ = traces_np[idx:idx+50]
    np_random_time = time.time() - start
    print(f"  NumPy: {np_random_time:.4f}s")
    
    # Zarr
    start = time.time()
    for idx in indices:
        _ = traces_zarr[idx:idx+50]
    zarr_random_time = time.time() - start
    print(f"  Zarr:  {zarr_random_time:.4f}s")

if __name__ == "__main__":
    # Run all examples
    local_dataset = example_local_usage()
    zarr_array = example_streaming_usage()
    selected_data = example_chunk_selection()
    benchmark_access_patterns()
    
    print("\n=== Summary ===")
    print("✅ Local dataset loading works")
    print("✅ Zarr conversion and streaming works")
    print("✅ Chunk selection works")
    print("✅ Access pattern benchmarking works")
    print("\nThe dataset is ready for use with Hugging Face Hub!")
    print("Next steps:")
    print("1. Push this dataset to Hugging Face Hub")
    print("2. Use datasets.load_dataset('DLSCA/test') to access it")
    print("3. The streaming will automatically use zarr chunks for large traces")